Version:  2.0.40 2.2.26 2.4.37 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15 3.16

Linux/net/ipv4/tcp.c

  1 /*
  2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
  3  *              operating system.  INET is implemented using the  BSD Socket
  4  *              interface as the means of communication with the user level.
  5  *
  6  *              Implementation of the Transmission Control Protocol(TCP).
  7  *
  8  * Authors:     Ross Biro
  9  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 10  *              Mark Evans, <evansmp@uhura.aston.ac.uk>
 11  *              Corey Minyard <wf-rch!minyard@relay.EU.net>
 12  *              Florian La Roche, <flla@stud.uni-sb.de>
 13  *              Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
 14  *              Linus Torvalds, <torvalds@cs.helsinki.fi>
 15  *              Alan Cox, <gw4pts@gw4pts.ampr.org>
 16  *              Matthew Dillon, <dillon@apollo.west.oic.com>
 17  *              Arnt Gulbrandsen, <agulbra@nvg.unit.no>
 18  *              Jorge Cwik, <jorge@laser.satlink.net>
 19  *
 20  * Fixes:
 21  *              Alan Cox        :       Numerous verify_area() calls
 22  *              Alan Cox        :       Set the ACK bit on a reset
 23  *              Alan Cox        :       Stopped it crashing if it closed while
 24  *                                      sk->inuse=1 and was trying to connect
 25  *                                      (tcp_err()).
 26  *              Alan Cox        :       All icmp error handling was broken
 27  *                                      pointers passed where wrong and the
 28  *                                      socket was looked up backwards. Nobody
 29  *                                      tested any icmp error code obviously.
 30  *              Alan Cox        :       tcp_err() now handled properly. It
 31  *                                      wakes people on errors. poll
 32  *                                      behaves and the icmp error race
 33  *                                      has gone by moving it into sock.c
 34  *              Alan Cox        :       tcp_send_reset() fixed to work for
 35  *                                      everything not just packets for
 36  *                                      unknown sockets.
 37  *              Alan Cox        :       tcp option processing.
 38  *              Alan Cox        :       Reset tweaked (still not 100%) [Had
 39  *                                      syn rule wrong]
 40  *              Herp Rosmanith  :       More reset fixes
 41  *              Alan Cox        :       No longer acks invalid rst frames.
 42  *                                      Acking any kind of RST is right out.
 43  *              Alan Cox        :       Sets an ignore me flag on an rst
 44  *                                      receive otherwise odd bits of prattle
 45  *                                      escape still
 46  *              Alan Cox        :       Fixed another acking RST frame bug.
 47  *                                      Should stop LAN workplace lockups.
 48  *              Alan Cox        :       Some tidyups using the new skb list
 49  *                                      facilities
 50  *              Alan Cox        :       sk->keepopen now seems to work
 51  *              Alan Cox        :       Pulls options out correctly on accepts
 52  *              Alan Cox        :       Fixed assorted sk->rqueue->next errors
 53  *              Alan Cox        :       PSH doesn't end a TCP read. Switched a
 54  *                                      bit to skb ops.
 55  *              Alan Cox        :       Tidied tcp_data to avoid a potential
 56  *                                      nasty.
 57  *              Alan Cox        :       Added some better commenting, as the
 58  *                                      tcp is hard to follow
 59  *              Alan Cox        :       Removed incorrect check for 20 * psh
 60  *      Michael O'Reilly        :       ack < copied bug fix.
 61  *      Johannes Stille         :       Misc tcp fixes (not all in yet).
 62  *              Alan Cox        :       FIN with no memory -> CRASH
 63  *              Alan Cox        :       Added socket option proto entries.
 64  *                                      Also added awareness of them to accept.
 65  *              Alan Cox        :       Added TCP options (SOL_TCP)
 66  *              Alan Cox        :       Switched wakeup calls to callbacks,
 67  *                                      so the kernel can layer network
 68  *                                      sockets.
 69  *              Alan Cox        :       Use ip_tos/ip_ttl settings.
 70  *              Alan Cox        :       Handle FIN (more) properly (we hope).
 71  *              Alan Cox        :       RST frames sent on unsynchronised
 72  *                                      state ack error.
 73  *              Alan Cox        :       Put in missing check for SYN bit.
 74  *              Alan Cox        :       Added tcp_select_window() aka NET2E
 75  *                                      window non shrink trick.
 76  *              Alan Cox        :       Added a couple of small NET2E timer
 77  *                                      fixes
 78  *              Charles Hedrick :       TCP fixes
 79  *              Toomas Tamm     :       TCP window fixes
 80  *              Alan Cox        :       Small URG fix to rlogin ^C ack fight
 81  *              Charles Hedrick :       Rewrote most of it to actually work
 82  *              Linus           :       Rewrote tcp_read() and URG handling
 83  *                                      completely
 84  *              Gerhard Koerting:       Fixed some missing timer handling
 85  *              Matthew Dillon  :       Reworked TCP machine states as per RFC
 86  *              Gerhard Koerting:       PC/TCP workarounds
 87  *              Adam Caldwell   :       Assorted timer/timing errors
 88  *              Matthew Dillon  :       Fixed another RST bug
 89  *              Alan Cox        :       Move to kernel side addressing changes.
 90  *              Alan Cox        :       Beginning work on TCP fastpathing
 91  *                                      (not yet usable)
 92  *              Arnt Gulbrandsen:       Turbocharged tcp_check() routine.
 93  *              Alan Cox        :       TCP fast path debugging
 94  *              Alan Cox        :       Window clamping
 95  *              Michael Riepe   :       Bug in tcp_check()
 96  *              Matt Dillon     :       More TCP improvements and RST bug fixes
 97  *              Matt Dillon     :       Yet more small nasties remove from the
 98  *                                      TCP code (Be very nice to this man if
 99  *                                      tcp finally works 100%) 8)
100  *              Alan Cox        :       BSD accept semantics.
101  *              Alan Cox        :       Reset on closedown bug.
102  *      Peter De Schrijver      :       ENOTCONN check missing in tcp_sendto().
103  *              Michael Pall    :       Handle poll() after URG properly in
104  *                                      all cases.
105  *              Michael Pall    :       Undo the last fix in tcp_read_urg()
106  *                                      (multi URG PUSH broke rlogin).
107  *              Michael Pall    :       Fix the multi URG PUSH problem in
108  *                                      tcp_readable(), poll() after URG
109  *                                      works now.
110  *              Michael Pall    :       recv(...,MSG_OOB) never blocks in the
111  *                                      BSD api.
112  *              Alan Cox        :       Changed the semantics of sk->socket to
113  *                                      fix a race and a signal problem with
114  *                                      accept() and async I/O.
115  *              Alan Cox        :       Relaxed the rules on tcp_sendto().
116  *              Yury Shevchuk   :       Really fixed accept() blocking problem.
117  *              Craig I. Hagan  :       Allow for BSD compatible TIME_WAIT for
118  *                                      clients/servers which listen in on
119  *                                      fixed ports.
120  *              Alan Cox        :       Cleaned the above up and shrank it to
121  *                                      a sensible code size.
122  *              Alan Cox        :       Self connect lockup fix.
123  *              Alan Cox        :       No connect to multicast.
124  *              Ross Biro       :       Close unaccepted children on master
125  *                                      socket close.
126  *              Alan Cox        :       Reset tracing code.
127  *              Alan Cox        :       Spurious resets on shutdown.
128  *              Alan Cox        :       Giant 15 minute/60 second timer error
129  *              Alan Cox        :       Small whoops in polling before an
130  *                                      accept.
131  *              Alan Cox        :       Kept the state trace facility since
132  *                                      it's handy for debugging.
133  *              Alan Cox        :       More reset handler fixes.
134  *              Alan Cox        :       Started rewriting the code based on
135  *                                      the RFC's for other useful protocol
136  *                                      references see: Comer, KA9Q NOS, and
137  *                                      for a reference on the difference
138  *                                      between specifications and how BSD
139  *                                      works see the 4.4lite source.
140  *              A.N.Kuznetsov   :       Don't time wait on completion of tidy
141  *                                      close.
142  *              Linus Torvalds  :       Fin/Shutdown & copied_seq changes.
143  *              Linus Torvalds  :       Fixed BSD port reuse to work first syn
144  *              Alan Cox        :       Reimplemented timers as per the RFC
145  *                                      and using multiple timers for sanity.
146  *              Alan Cox        :       Small bug fixes, and a lot of new
147  *                                      comments.
148  *              Alan Cox        :       Fixed dual reader crash by locking
149  *                                      the buffers (much like datagram.c)
150  *              Alan Cox        :       Fixed stuck sockets in probe. A probe
151  *                                      now gets fed up of retrying without
152  *                                      (even a no space) answer.
153  *              Alan Cox        :       Extracted closing code better
154  *              Alan Cox        :       Fixed the closing state machine to
155  *                                      resemble the RFC.
156  *              Alan Cox        :       More 'per spec' fixes.
157  *              Jorge Cwik      :       Even faster checksumming.
158  *              Alan Cox        :       tcp_data() doesn't ack illegal PSH
159  *                                      only frames. At least one pc tcp stack
160  *                                      generates them.
161  *              Alan Cox        :       Cache last socket.
162  *              Alan Cox        :       Per route irtt.
163  *              Matt Day        :       poll()->select() match BSD precisely on error
164  *              Alan Cox        :       New buffers
165  *              Marc Tamsky     :       Various sk->prot->retransmits and
166  *                                      sk->retransmits misupdating fixed.
167  *                                      Fixed tcp_write_timeout: stuck close,
168  *                                      and TCP syn retries gets used now.
169  *              Mark Yarvis     :       In tcp_read_wakeup(), don't send an
170  *                                      ack if state is TCP_CLOSED.
171  *              Alan Cox        :       Look up device on a retransmit - routes may
172  *                                      change. Doesn't yet cope with MSS shrink right
173  *                                      but it's a start!
174  *              Marc Tamsky     :       Closing in closing fixes.
175  *              Mike Shaver     :       RFC1122 verifications.
176  *              Alan Cox        :       rcv_saddr errors.
177  *              Alan Cox        :       Block double connect().
178  *              Alan Cox        :       Small hooks for enSKIP.
179  *              Alexey Kuznetsov:       Path MTU discovery.
180  *              Alan Cox        :       Support soft errors.
181  *              Alan Cox        :       Fix MTU discovery pathological case
182  *                                      when the remote claims no mtu!
183  *              Marc Tamsky     :       TCP_CLOSE fix.
184  *              Colin (G3TNE)   :       Send a reset on syn ack replies in
185  *                                      window but wrong (fixes NT lpd problems)
186  *              Pedro Roque     :       Better TCP window handling, delayed ack.
187  *              Joerg Reuter    :       No modification of locked buffers in
188  *                                      tcp_do_retransmit()
189  *              Eric Schenk     :       Changed receiver side silly window
190  *                                      avoidance algorithm to BSD style
191  *                                      algorithm. This doubles throughput
192  *                                      against machines running Solaris,
193  *                                      and seems to result in general
194  *                                      improvement.
195  *      Stefan Magdalinski      :       adjusted tcp_readable() to fix FIONREAD
196  *      Willy Konynenberg       :       Transparent proxying support.
197  *      Mike McLagan            :       Routing by source
198  *              Keith Owens     :       Do proper merging with partial SKB's in
199  *                                      tcp_do_sendmsg to avoid burstiness.
200  *              Eric Schenk     :       Fix fast close down bug with
201  *                                      shutdown() followed by close().
202  *              Andi Kleen      :       Make poll agree with SIGIO
203  *      Salvatore Sanfilippo    :       Support SO_LINGER with linger == 1 and
204  *                                      lingertime == 0 (RFC 793 ABORT Call)
205  *      Hirokazu Takahashi      :       Use copy_from_user() instead of
206  *                                      csum_and_copy_from_user() if possible.
207  *
208  *              This program is free software; you can redistribute it and/or
209  *              modify it under the terms of the GNU General Public License
210  *              as published by the Free Software Foundation; either version
211  *              2 of the License, or(at your option) any later version.
212  *
213  * Description of States:
214  *
215  *      TCP_SYN_SENT            sent a connection request, waiting for ack
216  *
217  *      TCP_SYN_RECV            received a connection request, sent ack,
218  *                              waiting for final ack in three-way handshake.
219  *
220  *      TCP_ESTABLISHED         connection established
221  *
222  *      TCP_FIN_WAIT1           our side has shutdown, waiting to complete
223  *                              transmission of remaining buffered data
224  *
225  *      TCP_FIN_WAIT2           all buffered data sent, waiting for remote
226  *                              to shutdown
227  *
228  *      TCP_CLOSING             both sides have shutdown but we still have
229  *                              data we have to finish sending
230  *
231  *      TCP_TIME_WAIT           timeout to catch resent junk before entering
232  *                              closed, can only be entered from FIN_WAIT2
233  *                              or CLOSING.  Required because the other end
234  *                              may not have gotten our last ACK causing it
235  *                              to retransmit the data packet (which we ignore)
236  *
237  *      TCP_CLOSE_WAIT          remote side has shutdown and is waiting for
238  *                              us to finish writing our data and to shutdown
239  *                              (we have to close() to move on to LAST_ACK)
240  *
241  *      TCP_LAST_ACK            out side has shutdown after remote has
242  *                              shutdown.  There may still be data in our
243  *                              buffer that we have to finish sending
244  *
245  *      TCP_CLOSE               socket is finished
246  */
247 
248 #define pr_fmt(fmt) "TCP: " fmt
249 
250 #include <linux/kernel.h>
251 #include <linux/module.h>
252 #include <linux/types.h>
253 #include <linux/fcntl.h>
254 #include <linux/poll.h>
255 #include <linux/init.h>
256 #include <linux/fs.h>
257 #include <linux/skbuff.h>
258 #include <linux/scatterlist.h>
259 #include <linux/splice.h>
260 #include <linux/net.h>
261 #include <linux/socket.h>
262 #include <linux/random.h>
263 #include <linux/bootmem.h>
264 #include <linux/highmem.h>
265 #include <linux/swap.h>
266 #include <linux/cache.h>
267 #include <linux/err.h>
268 #include <linux/crypto.h>
269 #include <linux/time.h>
270 #include <linux/slab.h>
271 
272 #include <net/icmp.h>
273 #include <net/inet_common.h>
274 #include <net/tcp.h>
275 #include <net/xfrm.h>
276 #include <net/ip.h>
277 #include <net/netdma.h>
278 #include <net/sock.h>
279 
280 #include <asm/uaccess.h>
281 #include <asm/ioctls.h>
282 #include <net/busy_poll.h>
283 
284 int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
285 
286 int sysctl_tcp_min_tso_segs __read_mostly = 2;
287 
288 int sysctl_tcp_autocorking __read_mostly = 1;
289 
290 struct percpu_counter tcp_orphan_count;
291 EXPORT_SYMBOL_GPL(tcp_orphan_count);
292 
293 long sysctl_tcp_mem[3] __read_mostly;
294 int sysctl_tcp_wmem[3] __read_mostly;
295 int sysctl_tcp_rmem[3] __read_mostly;
296 
297 EXPORT_SYMBOL(sysctl_tcp_mem);
298 EXPORT_SYMBOL(sysctl_tcp_rmem);
299 EXPORT_SYMBOL(sysctl_tcp_wmem);
300 
301 atomic_long_t tcp_memory_allocated;     /* Current allocated memory. */
302 EXPORT_SYMBOL(tcp_memory_allocated);
303 
304 /*
305  * Current number of TCP sockets.
306  */
307 struct percpu_counter tcp_sockets_allocated;
308 EXPORT_SYMBOL(tcp_sockets_allocated);
309 
310 /*
311  * TCP splice context
312  */
313 struct tcp_splice_state {
314         struct pipe_inode_info *pipe;
315         size_t len;
316         unsigned int flags;
317 };
318 
319 /*
320  * Pressure flag: try to collapse.
321  * Technical note: it is used by multiple contexts non atomically.
322  * All the __sk_mem_schedule() is of this nature: accounting
323  * is strict, actions are advisory and have some latency.
324  */
325 int tcp_memory_pressure __read_mostly;
326 EXPORT_SYMBOL(tcp_memory_pressure);
327 
328 void tcp_enter_memory_pressure(struct sock *sk)
329 {
330         if (!tcp_memory_pressure) {
331                 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES);
332                 tcp_memory_pressure = 1;
333         }
334 }
335 EXPORT_SYMBOL(tcp_enter_memory_pressure);
336 
337 /* Convert seconds to retransmits based on initial and max timeout */
338 static u8 secs_to_retrans(int seconds, int timeout, int rto_max)
339 {
340         u8 res = 0;
341 
342         if (seconds > 0) {
343                 int period = timeout;
344 
345                 res = 1;
346                 while (seconds > period && res < 255) {
347                         res++;
348                         timeout <<= 1;
349                         if (timeout > rto_max)
350                                 timeout = rto_max;
351                         period += timeout;
352                 }
353         }
354         return res;
355 }
356 
357 /* Convert retransmits to seconds based on initial and max timeout */
358 static int retrans_to_secs(u8 retrans, int timeout, int rto_max)
359 {
360         int period = 0;
361 
362         if (retrans > 0) {
363                 period = timeout;
364                 while (--retrans) {
365                         timeout <<= 1;
366                         if (timeout > rto_max)
367                                 timeout = rto_max;
368                         period += timeout;
369                 }
370         }
371         return period;
372 }
373 
374 /* Address-family independent initialization for a tcp_sock.
375  *
376  * NOTE: A lot of things set to zero explicitly by call to
377  *       sk_alloc() so need not be done here.
378  */
379 void tcp_init_sock(struct sock *sk)
380 {
381         struct inet_connection_sock *icsk = inet_csk(sk);
382         struct tcp_sock *tp = tcp_sk(sk);
383 
384         __skb_queue_head_init(&tp->out_of_order_queue);
385         tcp_init_xmit_timers(sk);
386         tcp_prequeue_init(tp);
387         INIT_LIST_HEAD(&tp->tsq_node);
388 
389         icsk->icsk_rto = TCP_TIMEOUT_INIT;
390         tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
391 
392         /* So many TCP implementations out there (incorrectly) count the
393          * initial SYN frame in their delayed-ACK and congestion control
394          * algorithms that we must have the following bandaid to talk
395          * efficiently to them.  -DaveM
396          */
397         tp->snd_cwnd = TCP_INIT_CWND;
398 
399         /* See draft-stevens-tcpca-spec-01 for discussion of the
400          * initialization of these values.
401          */
402         tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
403         tp->snd_cwnd_clamp = ~0;
404         tp->mss_cache = TCP_MSS_DEFAULT;
405 
406         tp->reordering = sysctl_tcp_reordering;
407         tcp_enable_early_retrans(tp);
408         icsk->icsk_ca_ops = &tcp_init_congestion_ops;
409 
410         tp->tsoffset = 0;
411 
412         sk->sk_state = TCP_CLOSE;
413 
414         sk->sk_write_space = sk_stream_write_space;
415         sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
416 
417         icsk->icsk_sync_mss = tcp_sync_mss;
418 
419         sk->sk_sndbuf = sysctl_tcp_wmem[1];
420         sk->sk_rcvbuf = sysctl_tcp_rmem[1];
421 
422         local_bh_disable();
423         sock_update_memcg(sk);
424         sk_sockets_allocated_inc(sk);
425         local_bh_enable();
426 }
427 EXPORT_SYMBOL(tcp_init_sock);
428 
429 /*
430  *      Wait for a TCP event.
431  *
432  *      Note that we don't need to lock the socket, as the upper poll layers
433  *      take care of normal races (between the test and the event) and we don't
434  *      go look at any of the socket buffers directly.
435  */
436 unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
437 {
438         unsigned int mask;
439         struct sock *sk = sock->sk;
440         const struct tcp_sock *tp = tcp_sk(sk);
441 
442         sock_rps_record_flow(sk);
443 
444         sock_poll_wait(file, sk_sleep(sk), wait);
445         if (sk->sk_state == TCP_LISTEN)
446                 return inet_csk_listen_poll(sk);
447 
448         /* Socket is not locked. We are protected from async events
449          * by poll logic and correct handling of state changes
450          * made by other threads is impossible in any case.
451          */
452 
453         mask = 0;
454 
455         /*
456          * POLLHUP is certainly not done right. But poll() doesn't
457          * have a notion of HUP in just one direction, and for a
458          * socket the read side is more interesting.
459          *
460          * Some poll() documentation says that POLLHUP is incompatible
461          * with the POLLOUT/POLLWR flags, so somebody should check this
462          * all. But careful, it tends to be safer to return too many
463          * bits than too few, and you can easily break real applications
464          * if you don't tell them that something has hung up!
465          *
466          * Check-me.
467          *
468          * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
469          * our fs/select.c). It means that after we received EOF,
470          * poll always returns immediately, making impossible poll() on write()
471          * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
472          * if and only if shutdown has been made in both directions.
473          * Actually, it is interesting to look how Solaris and DUX
474          * solve this dilemma. I would prefer, if POLLHUP were maskable,
475          * then we could set it on SND_SHUTDOWN. BTW examples given
476          * in Stevens' books assume exactly this behaviour, it explains
477          * why POLLHUP is incompatible with POLLOUT.    --ANK
478          *
479          * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
480          * blocking on fresh not-connected or disconnected socket. --ANK
481          */
482         if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
483                 mask |= POLLHUP;
484         if (sk->sk_shutdown & RCV_SHUTDOWN)
485                 mask |= POLLIN | POLLRDNORM | POLLRDHUP;
486 
487         /* Connected or passive Fast Open socket? */
488         if (sk->sk_state != TCP_SYN_SENT &&
489             (sk->sk_state != TCP_SYN_RECV || tp->fastopen_rsk != NULL)) {
490                 int target = sock_rcvlowat(sk, 0, INT_MAX);
491 
492                 if (tp->urg_seq == tp->copied_seq &&
493                     !sock_flag(sk, SOCK_URGINLINE) &&
494                     tp->urg_data)
495                         target++;
496 
497                 /* Potential race condition. If read of tp below will
498                  * escape above sk->sk_state, we can be illegally awaken
499                  * in SYN_* states. */
500                 if (tp->rcv_nxt - tp->copied_seq >= target)
501                         mask |= POLLIN | POLLRDNORM;
502 
503                 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
504                         if (sk_stream_is_writeable(sk)) {
505                                 mask |= POLLOUT | POLLWRNORM;
506                         } else {  /* send SIGIO later */
507                                 set_bit(SOCK_ASYNC_NOSPACE,
508                                         &sk->sk_socket->flags);
509                                 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
510 
511                                 /* Race breaker. If space is freed after
512                                  * wspace test but before the flags are set,
513                                  * IO signal will be lost.
514                                  */
515                                 if (sk_stream_is_writeable(sk))
516                                         mask |= POLLOUT | POLLWRNORM;
517                         }
518                 } else
519                         mask |= POLLOUT | POLLWRNORM;
520 
521                 if (tp->urg_data & TCP_URG_VALID)
522                         mask |= POLLPRI;
523         }
524         /* This barrier is coupled with smp_wmb() in tcp_reset() */
525         smp_rmb();
526         if (sk->sk_err)
527                 mask |= POLLERR;
528 
529         return mask;
530 }
531 EXPORT_SYMBOL(tcp_poll);
532 
533 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
534 {
535         struct tcp_sock *tp = tcp_sk(sk);
536         int answ;
537         bool slow;
538 
539         switch (cmd) {
540         case SIOCINQ:
541                 if (sk->sk_state == TCP_LISTEN)
542                         return -EINVAL;
543 
544                 slow = lock_sock_fast(sk);
545                 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
546                         answ = 0;
547                 else if (sock_flag(sk, SOCK_URGINLINE) ||
548                          !tp->urg_data ||
549                          before(tp->urg_seq, tp->copied_seq) ||
550                          !before(tp->urg_seq, tp->rcv_nxt)) {
551 
552                         answ = tp->rcv_nxt - tp->copied_seq;
553 
554                         /* Subtract 1, if FIN was received */
555                         if (answ && sock_flag(sk, SOCK_DONE))
556                                 answ--;
557                 } else
558                         answ = tp->urg_seq - tp->copied_seq;
559                 unlock_sock_fast(sk, slow);
560                 break;
561         case SIOCATMARK:
562                 answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
563                 break;
564         case SIOCOUTQ:
565                 if (sk->sk_state == TCP_LISTEN)
566                         return -EINVAL;
567 
568                 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
569                         answ = 0;
570                 else
571                         answ = tp->write_seq - tp->snd_una;
572                 break;
573         case SIOCOUTQNSD:
574                 if (sk->sk_state == TCP_LISTEN)
575                         return -EINVAL;
576 
577                 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
578                         answ = 0;
579                 else
580                         answ = tp->write_seq - tp->snd_nxt;
581                 break;
582         default:
583                 return -ENOIOCTLCMD;
584         }
585 
586         return put_user(answ, (int __user *)arg);
587 }
588 EXPORT_SYMBOL(tcp_ioctl);
589 
590 static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
591 {
592         TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
593         tp->pushed_seq = tp->write_seq;
594 }
595 
596 static inline bool forced_push(const struct tcp_sock *tp)
597 {
598         return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
599 }
600 
601 static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
602 {
603         struct tcp_sock *tp = tcp_sk(sk);
604         struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
605 
606         skb->csum    = 0;
607         tcb->seq     = tcb->end_seq = tp->write_seq;
608         tcb->tcp_flags = TCPHDR_ACK;
609         tcb->sacked  = 0;
610         skb_header_release(skb);
611         tcp_add_write_queue_tail(sk, skb);
612         sk->sk_wmem_queued += skb->truesize;
613         sk_mem_charge(sk, skb->truesize);
614         if (tp->nonagle & TCP_NAGLE_PUSH)
615                 tp->nonagle &= ~TCP_NAGLE_PUSH;
616 }
617 
618 static inline void tcp_mark_urg(struct tcp_sock *tp, int flags)
619 {
620         if (flags & MSG_OOB)
621                 tp->snd_up = tp->write_seq;
622 }
623 
624 /* If a not yet filled skb is pushed, do not send it if
625  * we have data packets in Qdisc or NIC queues :
626  * Because TX completion will happen shortly, it gives a chance
627  * to coalesce future sendmsg() payload into this skb, without
628  * need for a timer, and with no latency trade off.
629  * As packets containing data payload have a bigger truesize
630  * than pure acks (dataless) packets, the last checks prevent
631  * autocorking if we only have an ACK in Qdisc/NIC queues,
632  * or if TX completion was delayed after we processed ACK packet.
633  */
634 static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb,
635                                 int size_goal)
636 {
637         return skb->len < size_goal &&
638                sysctl_tcp_autocorking &&
639                skb != tcp_write_queue_head(sk) &&
640                atomic_read(&sk->sk_wmem_alloc) > skb->truesize;
641 }
642 
643 static void tcp_push(struct sock *sk, int flags, int mss_now,
644                      int nonagle, int size_goal)
645 {
646         struct tcp_sock *tp = tcp_sk(sk);
647         struct sk_buff *skb;
648 
649         if (!tcp_send_head(sk))
650                 return;
651 
652         skb = tcp_write_queue_tail(sk);
653         if (!(flags & MSG_MORE) || forced_push(tp))
654                 tcp_mark_push(tp, skb);
655 
656         tcp_mark_urg(tp, flags);
657 
658         if (tcp_should_autocork(sk, skb, size_goal)) {
659 
660                 /* avoid atomic op if TSQ_THROTTLED bit is already set */
661                 if (!test_bit(TSQ_THROTTLED, &tp->tsq_flags)) {
662                         NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING);
663                         set_bit(TSQ_THROTTLED, &tp->tsq_flags);
664                 }
665                 /* It is possible TX completion already happened
666                  * before we set TSQ_THROTTLED.
667                  */
668                 if (atomic_read(&sk->sk_wmem_alloc) > skb->truesize)
669                         return;
670         }
671 
672         if (flags & MSG_MORE)
673                 nonagle = TCP_NAGLE_CORK;
674 
675         __tcp_push_pending_frames(sk, mss_now, nonagle);
676 }
677 
678 static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
679                                 unsigned int offset, size_t len)
680 {
681         struct tcp_splice_state *tss = rd_desc->arg.data;
682         int ret;
683 
684         ret = skb_splice_bits(skb, offset, tss->pipe, min(rd_desc->count, len),
685                               tss->flags);
686         if (ret > 0)
687                 rd_desc->count -= ret;
688         return ret;
689 }
690 
691 static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
692 {
693         /* Store TCP splice context information in read_descriptor_t. */
694         read_descriptor_t rd_desc = {
695                 .arg.data = tss,
696                 .count    = tss->len,
697         };
698 
699         return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv);
700 }
701 
702 /**
703  *  tcp_splice_read - splice data from TCP socket to a pipe
704  * @sock:       socket to splice from
705  * @ppos:       position (not valid)
706  * @pipe:       pipe to splice to
707  * @len:        number of bytes to splice
708  * @flags:      splice modifier flags
709  *
710  * Description:
711  *    Will read pages from given socket and fill them into a pipe.
712  *
713  **/
714 ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
715                         struct pipe_inode_info *pipe, size_t len,
716                         unsigned int flags)
717 {
718         struct sock *sk = sock->sk;
719         struct tcp_splice_state tss = {
720                 .pipe = pipe,
721                 .len = len,
722                 .flags = flags,
723         };
724         long timeo;
725         ssize_t spliced;
726         int ret;
727 
728         sock_rps_record_flow(sk);
729         /*
730          * We can't seek on a socket input
731          */
732         if (unlikely(*ppos))
733                 return -ESPIPE;
734 
735         ret = spliced = 0;
736 
737         lock_sock(sk);
738 
739         timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK);
740         while (tss.len) {
741                 ret = __tcp_splice_read(sk, &tss);
742                 if (ret < 0)
743                         break;
744                 else if (!ret) {
745                         if (spliced)
746                                 break;
747                         if (sock_flag(sk, SOCK_DONE))
748                                 break;
749                         if (sk->sk_err) {
750                                 ret = sock_error(sk);
751                                 break;
752                         }
753                         if (sk->sk_shutdown & RCV_SHUTDOWN)
754                                 break;
755                         if (sk->sk_state == TCP_CLOSE) {
756                                 /*
757                                  * This occurs when user tries to read
758                                  * from never connected socket.
759                                  */
760                                 if (!sock_flag(sk, SOCK_DONE))
761                                         ret = -ENOTCONN;
762                                 break;
763                         }
764                         if (!timeo) {
765                                 ret = -EAGAIN;
766                                 break;
767                         }
768                         sk_wait_data(sk, &timeo);
769                         if (signal_pending(current)) {
770                                 ret = sock_intr_errno(timeo);
771                                 break;
772                         }
773                         continue;
774                 }
775                 tss.len -= ret;
776                 spliced += ret;
777 
778                 if (!timeo)
779                         break;
780                 release_sock(sk);
781                 lock_sock(sk);
782 
783                 if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
784                     (sk->sk_shutdown & RCV_SHUTDOWN) ||
785                     signal_pending(current))
786                         break;
787         }
788 
789         release_sock(sk);
790 
791         if (spliced)
792                 return spliced;
793 
794         return ret;
795 }
796 EXPORT_SYMBOL(tcp_splice_read);
797 
798 struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
799 {
800         struct sk_buff *skb;
801 
802         /* The TCP header must be at least 32-bit aligned.  */
803         size = ALIGN(size, 4);
804 
805         skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
806         if (skb) {
807                 if (sk_wmem_schedule(sk, skb->truesize)) {
808                         skb_reserve(skb, sk->sk_prot->max_header);
809                         /*
810                          * Make sure that we have exactly size bytes
811                          * available to the caller, no more, no less.
812                          */
813                         skb->reserved_tailroom = skb->end - skb->tail - size;
814                         return skb;
815                 }
816                 __kfree_skb(skb);
817         } else {
818                 sk->sk_prot->enter_memory_pressure(sk);
819                 sk_stream_moderate_sndbuf(sk);
820         }
821         return NULL;
822 }
823 
824 static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
825                                        int large_allowed)
826 {
827         struct tcp_sock *tp = tcp_sk(sk);
828         u32 xmit_size_goal, old_size_goal;
829 
830         xmit_size_goal = mss_now;
831 
832         if (large_allowed && sk_can_gso(sk)) {
833                 u32 gso_size, hlen;
834 
835                 /* Maybe we should/could use sk->sk_prot->max_header here ? */
836                 hlen = inet_csk(sk)->icsk_af_ops->net_header_len +
837                        inet_csk(sk)->icsk_ext_hdr_len +
838                        tp->tcp_header_len;
839 
840                 /* Goal is to send at least one packet per ms,
841                  * not one big TSO packet every 100 ms.
842                  * This preserves ACK clocking and is consistent
843                  * with tcp_tso_should_defer() heuristic.
844                  */
845                 gso_size = sk->sk_pacing_rate / (2 * MSEC_PER_SEC);
846                 gso_size = max_t(u32, gso_size,
847                                  sysctl_tcp_min_tso_segs * mss_now);
848 
849                 xmit_size_goal = min_t(u32, gso_size,
850                                        sk->sk_gso_max_size - 1 - hlen);
851 
852                 xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal);
853 
854                 /* We try hard to avoid divides here */
855                 old_size_goal = tp->xmit_size_goal_segs * mss_now;
856 
857                 if (likely(old_size_goal <= xmit_size_goal &&
858                            old_size_goal + mss_now > xmit_size_goal)) {
859                         xmit_size_goal = old_size_goal;
860                 } else {
861                         tp->xmit_size_goal_segs =
862                                 min_t(u16, xmit_size_goal / mss_now,
863                                       sk->sk_gso_max_segs);
864                         xmit_size_goal = tp->xmit_size_goal_segs * mss_now;
865                 }
866         }
867 
868         return max(xmit_size_goal, mss_now);
869 }
870 
871 static int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
872 {
873         int mss_now;
874 
875         mss_now = tcp_current_mss(sk);
876         *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB));
877 
878         return mss_now;
879 }
880 
881 static ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
882                                 size_t size, int flags)
883 {
884         struct tcp_sock *tp = tcp_sk(sk);
885         int mss_now, size_goal;
886         int err;
887         ssize_t copied;
888         long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
889 
890         /* Wait for a connection to finish. One exception is TCP Fast Open
891          * (passive side) where data is allowed to be sent before a connection
892          * is fully established.
893          */
894         if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
895             !tcp_passive_fastopen(sk)) {
896                 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
897                         goto out_err;
898         }
899 
900         clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
901 
902         mss_now = tcp_send_mss(sk, &size_goal, flags);
903         copied = 0;
904 
905         err = -EPIPE;
906         if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
907                 goto out_err;
908 
909         while (size > 0) {
910                 struct sk_buff *skb = tcp_write_queue_tail(sk);
911                 int copy, i;
912                 bool can_coalesce;
913 
914                 if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
915 new_segment:
916                         if (!sk_stream_memory_free(sk))
917                                 goto wait_for_sndbuf;
918 
919                         skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
920                         if (!skb)
921                                 goto wait_for_memory;
922 
923                         skb_entail(sk, skb);
924                         copy = size_goal;
925                 }
926 
927                 if (copy > size)
928                         copy = size;
929 
930                 i = skb_shinfo(skb)->nr_frags;
931                 can_coalesce = skb_can_coalesce(skb, i, page, offset);
932                 if (!can_coalesce && i >= MAX_SKB_FRAGS) {
933                         tcp_mark_push(tp, skb);
934                         goto new_segment;
935                 }
936                 if (!sk_wmem_schedule(sk, copy))
937                         goto wait_for_memory;
938 
939                 if (can_coalesce) {
940                         skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
941                 } else {
942                         get_page(page);
943                         skb_fill_page_desc(skb, i, page, offset, copy);
944                 }
945                 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
946 
947                 skb->len += copy;
948                 skb->data_len += copy;
949                 skb->truesize += copy;
950                 sk->sk_wmem_queued += copy;
951                 sk_mem_charge(sk, copy);
952                 skb->ip_summed = CHECKSUM_PARTIAL;
953                 tp->write_seq += copy;
954                 TCP_SKB_CB(skb)->end_seq += copy;
955                 skb_shinfo(skb)->gso_segs = 0;
956 
957                 if (!copied)
958                         TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
959 
960                 copied += copy;
961                 offset += copy;
962                 if (!(size -= copy))
963                         goto out;
964 
965                 if (skb->len < size_goal || (flags & MSG_OOB))
966                         continue;
967 
968                 if (forced_push(tp)) {
969                         tcp_mark_push(tp, skb);
970                         __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
971                 } else if (skb == tcp_send_head(sk))
972                         tcp_push_one(sk, mss_now);
973                 continue;
974 
975 wait_for_sndbuf:
976                 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
977 wait_for_memory:
978                 tcp_push(sk, flags & ~MSG_MORE, mss_now,
979                          TCP_NAGLE_PUSH, size_goal);
980 
981                 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
982                         goto do_error;
983 
984                 mss_now = tcp_send_mss(sk, &size_goal, flags);
985         }
986 
987 out:
988         if (copied && !(flags & MSG_SENDPAGE_NOTLAST))
989                 tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
990         return copied;
991 
992 do_error:
993         if (copied)
994                 goto out;
995 out_err:
996         return sk_stream_error(sk, flags, err);
997 }
998 
999 int tcp_sendpage(struct sock *sk, struct page *page, int offset,
1000                  size_t size, int flags)
1001 {
1002         ssize_t res;
1003 
1004         if (!(sk->sk_route_caps & NETIF_F_SG) ||
1005             !(sk->sk_route_caps & NETIF_F_ALL_CSUM))
1006                 return sock_no_sendpage(sk->sk_socket, page, offset, size,
1007                                         flags);
1008 
1009         lock_sock(sk);
1010         res = do_tcp_sendpages(sk, page, offset, size, flags);
1011         release_sock(sk);
1012         return res;
1013 }
1014 EXPORT_SYMBOL(tcp_sendpage);
1015 
1016 static inline int select_size(const struct sock *sk, bool sg)
1017 {
1018         const struct tcp_sock *tp = tcp_sk(sk);
1019         int tmp = tp->mss_cache;
1020 
1021         if (sg) {
1022                 if (sk_can_gso(sk)) {
1023                         /* Small frames wont use a full page:
1024                          * Payload will immediately follow tcp header.
1025                          */
1026                         tmp = SKB_WITH_OVERHEAD(2048 - MAX_TCP_HEADER);
1027                 } else {
1028                         int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
1029 
1030                         if (tmp >= pgbreak &&
1031                             tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
1032                                 tmp = pgbreak;
1033                 }
1034         }
1035 
1036         return tmp;
1037 }
1038 
1039 void tcp_free_fastopen_req(struct tcp_sock *tp)
1040 {
1041         if (tp->fastopen_req != NULL) {
1042                 kfree(tp->fastopen_req);
1043                 tp->fastopen_req = NULL;
1044         }
1045 }
1046 
1047 static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
1048                                 int *copied, size_t size)
1049 {
1050         struct tcp_sock *tp = tcp_sk(sk);
1051         int err, flags;
1052 
1053         if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE))
1054                 return -EOPNOTSUPP;
1055         if (tp->fastopen_req != NULL)
1056                 return -EALREADY; /* Another Fast Open is in progress */
1057 
1058         tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request),
1059                                    sk->sk_allocation);
1060         if (unlikely(tp->fastopen_req == NULL))
1061                 return -ENOBUFS;
1062         tp->fastopen_req->data = msg;
1063         tp->fastopen_req->size = size;
1064 
1065         flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0;
1066         err = __inet_stream_connect(sk->sk_socket, msg->msg_name,
1067                                     msg->msg_namelen, flags);
1068         *copied = tp->fastopen_req->copied;
1069         tcp_free_fastopen_req(tp);
1070         return err;
1071 }
1072 
1073 int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1074                 size_t size)
1075 {
1076         struct iovec *iov;
1077         struct tcp_sock *tp = tcp_sk(sk);
1078         struct sk_buff *skb;
1079         int iovlen, flags, err, copied = 0;
1080         int mss_now = 0, size_goal, copied_syn = 0, offset = 0;
1081         bool sg;
1082         long timeo;
1083 
1084         lock_sock(sk);
1085 
1086         flags = msg->msg_flags;
1087         if (flags & MSG_FASTOPEN) {
1088                 err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size);
1089                 if (err == -EINPROGRESS && copied_syn > 0)
1090                         goto out;
1091                 else if (err)
1092                         goto out_err;
1093                 offset = copied_syn;
1094         }
1095 
1096         timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1097 
1098         /* Wait for a connection to finish. One exception is TCP Fast Open
1099          * (passive side) where data is allowed to be sent before a connection
1100          * is fully established.
1101          */
1102         if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
1103             !tcp_passive_fastopen(sk)) {
1104                 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
1105                         goto do_error;
1106         }
1107 
1108         if (unlikely(tp->repair)) {
1109                 if (tp->repair_queue == TCP_RECV_QUEUE) {
1110                         copied = tcp_send_rcvq(sk, msg, size);
1111                         goto out_nopush;
1112                 }
1113 
1114                 err = -EINVAL;
1115                 if (tp->repair_queue == TCP_NO_QUEUE)
1116                         goto out_err;
1117 
1118                 /* 'common' sending to sendq */
1119         }
1120 
1121         /* This should be in poll */
1122         clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1123 
1124         mss_now = tcp_send_mss(sk, &size_goal, flags);
1125 
1126         /* Ok commence sending. */
1127         iovlen = msg->msg_iovlen;
1128         iov = msg->msg_iov;
1129         copied = 0;
1130 
1131         err = -EPIPE;
1132         if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
1133                 goto out_err;
1134 
1135         sg = !!(sk->sk_route_caps & NETIF_F_SG);
1136 
1137         while (--iovlen >= 0) {
1138                 size_t seglen = iov->iov_len;
1139                 unsigned char __user *from = iov->iov_base;
1140 
1141                 iov++;
1142                 if (unlikely(offset > 0)) {  /* Skip bytes copied in SYN */
1143                         if (offset >= seglen) {
1144                                 offset -= seglen;
1145                                 continue;
1146                         }
1147                         seglen -= offset;
1148                         from += offset;
1149                         offset = 0;
1150                 }
1151 
1152                 while (seglen > 0) {
1153                         int copy = 0;
1154                         int max = size_goal;
1155 
1156                         skb = tcp_write_queue_tail(sk);
1157                         if (tcp_send_head(sk)) {
1158                                 if (skb->ip_summed == CHECKSUM_NONE)
1159                                         max = mss_now;
1160                                 copy = max - skb->len;
1161                         }
1162 
1163                         if (copy <= 0) {
1164 new_segment:
1165                                 /* Allocate new segment. If the interface is SG,
1166                                  * allocate skb fitting to single page.
1167                                  */
1168                                 if (!sk_stream_memory_free(sk))
1169                                         goto wait_for_sndbuf;
1170 
1171                                 skb = sk_stream_alloc_skb(sk,
1172                                                           select_size(sk, sg),
1173                                                           sk->sk_allocation);
1174                                 if (!skb)
1175                                         goto wait_for_memory;
1176 
1177                                 /*
1178                                  * All packets are restored as if they have
1179                                  * already been sent.
1180                                  */
1181                                 if (tp->repair)
1182                                         TCP_SKB_CB(skb)->when = tcp_time_stamp;
1183 
1184                                 /*
1185                                  * Check whether we can use HW checksum.
1186                                  */
1187                                 if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
1188                                         skb->ip_summed = CHECKSUM_PARTIAL;
1189 
1190                                 skb_entail(sk, skb);
1191                                 copy = size_goal;
1192                                 max = size_goal;
1193                         }
1194 
1195                         /* Try to append data to the end of skb. */
1196                         if (copy > seglen)
1197                                 copy = seglen;
1198 
1199                         /* Where to copy to? */
1200                         if (skb_availroom(skb) > 0) {
1201                                 /* We have some space in skb head. Superb! */
1202                                 copy = min_t(int, copy, skb_availroom(skb));
1203                                 err = skb_add_data_nocache(sk, skb, from, copy);
1204                                 if (err)
1205                                         goto do_fault;
1206                         } else {
1207                                 bool merge = true;
1208                                 int i = skb_shinfo(skb)->nr_frags;
1209                                 struct page_frag *pfrag = sk_page_frag(sk);
1210 
1211                                 if (!sk_page_frag_refill(sk, pfrag))
1212                                         goto wait_for_memory;
1213 
1214                                 if (!skb_can_coalesce(skb, i, pfrag->page,
1215                                                       pfrag->offset)) {
1216                                         if (i == MAX_SKB_FRAGS || !sg) {
1217                                                 tcp_mark_push(tp, skb);
1218                                                 goto new_segment;
1219                                         }
1220                                         merge = false;
1221                                 }
1222 
1223                                 copy = min_t(int, copy, pfrag->size - pfrag->offset);
1224 
1225                                 if (!sk_wmem_schedule(sk, copy))
1226                                         goto wait_for_memory;
1227 
1228                                 err = skb_copy_to_page_nocache(sk, from, skb,
1229                                                                pfrag->page,
1230                                                                pfrag->offset,
1231                                                                copy);
1232                                 if (err)
1233                                         goto do_error;
1234 
1235                                 /* Update the skb. */
1236                                 if (merge) {
1237                                         skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1238                                 } else {
1239                                         skb_fill_page_desc(skb, i, pfrag->page,
1240                                                            pfrag->offset, copy);
1241                                         get_page(pfrag->page);
1242                                 }
1243                                 pfrag->offset += copy;
1244                         }
1245 
1246                         if (!copied)
1247                                 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
1248 
1249                         tp->write_seq += copy;
1250                         TCP_SKB_CB(skb)->end_seq += copy;
1251                         skb_shinfo(skb)->gso_segs = 0;
1252 
1253                         from += copy;
1254                         copied += copy;
1255                         if ((seglen -= copy) == 0 && iovlen == 0)
1256                                 goto out;
1257 
1258                         if (skb->len < max || (flags & MSG_OOB) || unlikely(tp->repair))
1259                                 continue;
1260 
1261                         if (forced_push(tp)) {
1262                                 tcp_mark_push(tp, skb);
1263                                 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
1264                         } else if (skb == tcp_send_head(sk))
1265                                 tcp_push_one(sk, mss_now);
1266                         continue;
1267 
1268 wait_for_sndbuf:
1269                         set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1270 wait_for_memory:
1271                         if (copied)
1272                                 tcp_push(sk, flags & ~MSG_MORE, mss_now,
1273                                          TCP_NAGLE_PUSH, size_goal);
1274 
1275                         if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
1276                                 goto do_error;
1277 
1278                         mss_now = tcp_send_mss(sk, &size_goal, flags);
1279                 }
1280         }
1281 
1282 out:
1283         if (copied)
1284                 tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
1285 out_nopush:
1286         release_sock(sk);
1287         return copied + copied_syn;
1288 
1289 do_fault:
1290         if (!skb->len) {
1291                 tcp_unlink_write_queue(skb, sk);
1292                 /* It is the one place in all of TCP, except connection
1293                  * reset, where we can be unlinking the send_head.
1294                  */
1295                 tcp_check_send_head(sk, skb);
1296                 sk_wmem_free_skb(sk, skb);
1297         }
1298 
1299 do_error:
1300         if (copied + copied_syn)
1301                 goto out;
1302 out_err:
1303         err = sk_stream_error(sk, flags, err);
1304         release_sock(sk);
1305         return err;
1306 }
1307 EXPORT_SYMBOL(tcp_sendmsg);
1308 
1309 /*
1310  *      Handle reading urgent data. BSD has very simple semantics for
1311  *      this, no blocking and very strange errors 8)
1312  */
1313 
1314 static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags)
1315 {
1316         struct tcp_sock *tp = tcp_sk(sk);
1317 
1318         /* No URG data to read. */
1319         if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
1320             tp->urg_data == TCP_URG_READ)
1321                 return -EINVAL; /* Yes this is right ! */
1322 
1323         if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
1324                 return -ENOTCONN;
1325 
1326         if (tp->urg_data & TCP_URG_VALID) {
1327                 int err = 0;
1328                 char c = tp->urg_data;
1329 
1330                 if (!(flags & MSG_PEEK))
1331                         tp->urg_data = TCP_URG_READ;
1332 
1333                 /* Read urgent data. */
1334                 msg->msg_flags |= MSG_OOB;
1335 
1336                 if (len > 0) {
1337                         if (!(flags & MSG_TRUNC))
1338                                 err = memcpy_toiovec(msg->msg_iov, &c, 1);
1339                         len = 1;
1340                 } else
1341                         msg->msg_flags |= MSG_TRUNC;
1342 
1343                 return err ? -EFAULT : len;
1344         }
1345 
1346         if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
1347                 return 0;
1348 
1349         /* Fixed the recv(..., MSG_OOB) behaviour.  BSD docs and
1350          * the available implementations agree in this case:
1351          * this call should never block, independent of the
1352          * blocking state of the socket.
1353          * Mike <pall@rz.uni-karlsruhe.de>
1354          */
1355         return -EAGAIN;
1356 }
1357 
1358 static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
1359 {
1360         struct sk_buff *skb;
1361         int copied = 0, err = 0;
1362 
1363         /* XXX -- need to support SO_PEEK_OFF */
1364 
1365         skb_queue_walk(&sk->sk_write_queue, skb) {
1366                 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, skb->len);
1367                 if (err)
1368                         break;
1369 
1370                 copied += skb->len;
1371         }
1372 
1373         return err ?: copied;
1374 }
1375 
1376 /* Clean up the receive buffer for full frames taken by the user,
1377  * then send an ACK if necessary.  COPIED is the number of bytes
1378  * tcp_recvmsg has given to the user so far, it speeds up the
1379  * calculation of whether or not we must ACK for the sake of
1380  * a window update.
1381  */
1382 void tcp_cleanup_rbuf(struct sock *sk, int copied)
1383 {
1384         struct tcp_sock *tp = tcp_sk(sk);
1385         bool time_to_ack = false;
1386 
1387         struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1388 
1389         WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
1390              "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
1391              tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
1392 
1393         if (inet_csk_ack_scheduled(sk)) {
1394                 const struct inet_connection_sock *icsk = inet_csk(sk);
1395                    /* Delayed ACKs frequently hit locked sockets during bulk
1396                     * receive. */
1397                 if (icsk->icsk_ack.blocked ||
1398                     /* Once-per-two-segments ACK was not sent by tcp_input.c */
1399                     tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
1400                     /*
1401                      * If this read emptied read buffer, we send ACK, if
1402                      * connection is not bidirectional, user drained
1403                      * receive buffer and there was a small segment
1404                      * in queue.
1405                      */
1406                     (copied > 0 &&
1407                      ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) ||
1408                       ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
1409                        !icsk->icsk_ack.pingpong)) &&
1410                       !atomic_read(&sk->sk_rmem_alloc)))
1411                         time_to_ack = true;
1412         }
1413 
1414         /* We send an ACK if we can now advertise a non-zero window
1415          * which has been raised "significantly".
1416          *
1417          * Even if window raised up to infinity, do not send window open ACK
1418          * in states, where we will not receive more. It is useless.
1419          */
1420         if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
1421                 __u32 rcv_window_now = tcp_receive_window(tp);
1422 
1423                 /* Optimize, __tcp_select_window() is not cheap. */
1424                 if (2*rcv_window_now <= tp->window_clamp) {
1425                         __u32 new_window = __tcp_select_window(sk);
1426 
1427                         /* Send ACK now, if this read freed lots of space
1428                          * in our buffer. Certainly, new_window is new window.
1429                          * We can advertise it now, if it is not less than current one.
1430                          * "Lots" means "at least twice" here.
1431                          */
1432                         if (new_window && new_window >= 2 * rcv_window_now)
1433                                 time_to_ack = true;
1434                 }
1435         }
1436         if (time_to_ack)
1437                 tcp_send_ack(sk);
1438 }
1439 
1440 static void tcp_prequeue_process(struct sock *sk)
1441 {
1442         struct sk_buff *skb;
1443         struct tcp_sock *tp = tcp_sk(sk);
1444 
1445         NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPPREQUEUED);
1446 
1447         /* RX process wants to run with disabled BHs, though it is not
1448          * necessary */
1449         local_bh_disable();
1450         while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
1451                 sk_backlog_rcv(sk, skb);
1452         local_bh_enable();
1453 
1454         /* Clear memory counter. */
1455         tp->ucopy.memory = 0;
1456 }
1457 
1458 #ifdef CONFIG_NET_DMA
1459 static void tcp_service_net_dma(struct sock *sk, bool wait)
1460 {
1461         dma_cookie_t done, used;
1462         dma_cookie_t last_issued;
1463         struct tcp_sock *tp = tcp_sk(sk);
1464 
1465         if (!tp->ucopy.dma_chan)
1466                 return;
1467 
1468         last_issued = tp->ucopy.dma_cookie;
1469         dma_async_issue_pending(tp->ucopy.dma_chan);
1470 
1471         do {
1472                 if (dma_async_is_tx_complete(tp->ucopy.dma_chan,
1473                                               last_issued, &done,
1474                                               &used) == DMA_COMPLETE) {
1475                         /* Safe to free early-copied skbs now */
1476                         __skb_queue_purge(&sk->sk_async_wait_queue);
1477                         break;
1478                 } else {
1479                         struct sk_buff *skb;
1480                         while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
1481                                (dma_async_is_complete(skb->dma_cookie, done,
1482                                                       used) == DMA_COMPLETE)) {
1483                                 __skb_dequeue(&sk->sk_async_wait_queue);
1484                                 kfree_skb(skb);
1485                         }
1486                 }
1487         } while (wait);
1488 }
1489 #endif
1490 
1491 static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1492 {
1493         struct sk_buff *skb;
1494         u32 offset;
1495 
1496         while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) {
1497                 offset = seq - TCP_SKB_CB(skb)->seq;
1498                 if (tcp_hdr(skb)->syn)
1499                         offset--;
1500                 if (offset < skb->len || tcp_hdr(skb)->fin) {
1501                         *off = offset;
1502                         return skb;
1503                 }
1504                 /* This looks weird, but this can happen if TCP collapsing
1505                  * splitted a fat GRO packet, while we released socket lock
1506                  * in skb_splice_bits()
1507                  */
1508                 sk_eat_skb(sk, skb, false);
1509         }
1510         return NULL;
1511 }
1512 
1513 /*
1514  * This routine provides an alternative to tcp_recvmsg() for routines
1515  * that would like to handle copying from skbuffs directly in 'sendfile'
1516  * fashion.
1517  * Note:
1518  *      - It is assumed that the socket was locked by the caller.
1519  *      - The routine does not block.
1520  *      - At present, there is no support for reading OOB data
1521  *        or for 'peeking' the socket using this routine
1522  *        (although both would be easy to implement).
1523  */
1524 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1525                   sk_read_actor_t recv_actor)
1526 {
1527         struct sk_buff *skb;
1528         struct tcp_sock *tp = tcp_sk(sk);
1529         u32 seq = tp->copied_seq;
1530         u32 offset;
1531         int copied = 0;
1532 
1533         if (sk->sk_state == TCP_LISTEN)
1534                 return -ENOTCONN;
1535         while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
1536                 if (offset < skb->len) {
1537                         int used;
1538                         size_t len;
1539 
1540                         len = skb->len - offset;
1541                         /* Stop reading if we hit a patch of urgent data */
1542                         if (tp->urg_data) {
1543                                 u32 urg_offset = tp->urg_seq - seq;
1544                                 if (urg_offset < len)
1545                                         len = urg_offset;
1546                                 if (!len)
1547                                         break;
1548                         }
1549                         used = recv_actor(desc, skb, offset, len);
1550                         if (used <= 0) {
1551                                 if (!copied)
1552                                         copied = used;
1553                                 break;
1554                         } else if (used <= len) {
1555                                 seq += used;
1556                                 copied += used;
1557                                 offset += used;
1558                         }
1559                         /* If recv_actor drops the lock (e.g. TCP splice
1560                          * receive) the skb pointer might be invalid when
1561                          * getting here: tcp_collapse might have deleted it
1562                          * while aggregating skbs from the socket queue.
1563                          */
1564                         skb = tcp_recv_skb(sk, seq - 1, &offset);
1565                         if (!skb)
1566                                 break;
1567                         /* TCP coalescing might have appended data to the skb.
1568                          * Try to splice more frags
1569                          */
1570                         if (offset + 1 != skb->len)
1571                                 continue;
1572                 }
1573                 if (tcp_hdr(skb)->fin) {
1574                         sk_eat_skb(sk, skb, false);
1575                         ++seq;
1576                         break;
1577                 }
1578                 sk_eat_skb(sk, skb, false);
1579                 if (!desc->count)
1580                         break;
1581                 tp->copied_seq = seq;
1582         }
1583         tp->copied_seq = seq;
1584 
1585         tcp_rcv_space_adjust(sk);
1586 
1587         /* Clean up data we have read: This will do ACK frames. */
1588         if (copied > 0) {
1589                 tcp_recv_skb(sk, seq, &offset);
1590                 tcp_cleanup_rbuf(sk, copied);
1591         }
1592         return copied;
1593 }
1594 EXPORT_SYMBOL(tcp_read_sock);
1595 
1596 /*
1597  *      This routine copies from a sock struct into the user buffer.
1598  *
1599  *      Technical note: in 2.3 we work on _locked_ socket, so that
1600  *      tricks with *seq access order and skb->users are not required.
1601  *      Probably, code can be easily improved even more.
1602  */
1603 
1604 int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1605                 size_t len, int nonblock, int flags, int *addr_len)
1606 {
1607         struct tcp_sock *tp = tcp_sk(sk);
1608         int copied = 0;
1609         u32 peek_seq;
1610         u32 *seq;
1611         unsigned long used;
1612         int err;
1613         int target;             /* Read at least this many bytes */
1614         long timeo;
1615         struct task_struct *user_recv = NULL;
1616         bool copied_early = false;
1617         struct sk_buff *skb;
1618         u32 urg_hole = 0;
1619 
1620         if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) &&
1621             (sk->sk_state == TCP_ESTABLISHED))
1622                 sk_busy_loop(sk, nonblock);
1623 
1624         lock_sock(sk);
1625 
1626         err = -ENOTCONN;
1627         if (sk->sk_state == TCP_LISTEN)
1628                 goto out;
1629 
1630         timeo = sock_rcvtimeo(sk, nonblock);
1631 
1632         /* Urgent data needs to be handled specially. */
1633         if (flags & MSG_OOB)
1634                 goto recv_urg;
1635 
1636         if (unlikely(tp->repair)) {
1637                 err = -EPERM;
1638                 if (!(flags & MSG_PEEK))
1639                         goto out;
1640 
1641                 if (tp->repair_queue == TCP_SEND_QUEUE)
1642                         goto recv_sndq;
1643 
1644                 err = -EINVAL;
1645                 if (tp->repair_queue == TCP_NO_QUEUE)
1646                         goto out;
1647 
1648                 /* 'common' recv queue MSG_PEEK-ing */
1649         }
1650 
1651         seq = &tp->copied_seq;
1652         if (flags & MSG_PEEK) {
1653                 peek_seq = tp->copied_seq;
1654                 seq = &peek_seq;
1655         }
1656 
1657         target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1658 
1659 #ifdef CONFIG_NET_DMA
1660         tp->ucopy.dma_chan = NULL;
1661         preempt_disable();
1662         skb = skb_peek_tail(&sk->sk_receive_queue);
1663         {
1664                 int available = 0;
1665 
1666                 if (skb)
1667                         available = TCP_SKB_CB(skb)->seq + skb->len - (*seq);
1668                 if ((available < target) &&
1669                     (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
1670                     !sysctl_tcp_low_latency &&
1671                     net_dma_find_channel()) {
1672                         preempt_enable();
1673                         tp->ucopy.pinned_list =
1674                                         dma_pin_iovec_pages(msg->msg_iov, len);
1675                 } else {
1676                         preempt_enable();
1677                 }
1678         }
1679 #endif
1680 
1681         do {
1682                 u32 offset;
1683 
1684                 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
1685                 if (tp->urg_data && tp->urg_seq == *seq) {
1686                         if (copied)
1687                                 break;
1688                         if (signal_pending(current)) {
1689                                 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
1690                                 break;
1691                         }
1692                 }
1693 
1694                 /* Next get a buffer. */
1695 
1696                 skb_queue_walk(&sk->sk_receive_queue, skb) {
1697                         /* Now that we have two receive queues this
1698                          * shouldn't happen.
1699                          */
1700                         if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
1701                                  "recvmsg bug: copied %X seq %X rcvnxt %X fl %X\n",
1702                                  *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
1703                                  flags))
1704                                 break;
1705 
1706                         offset = *seq - TCP_SKB_CB(skb)->seq;
1707                         if (tcp_hdr(skb)->syn)
1708                                 offset--;
1709                         if (offset < skb->len)
1710                                 goto found_ok_skb;
1711                         if (tcp_hdr(skb)->fin)
1712                                 goto found_fin_ok;
1713                         WARN(!(flags & MSG_PEEK),
1714                              "recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n",
1715                              *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags);
1716                 }
1717 
1718                 /* Well, if we have backlog, try to process it now yet. */
1719 
1720                 if (copied >= target && !sk->sk_backlog.tail)
1721                         break;
1722 
1723                 if (copied) {
1724                         if (sk->sk_err ||
1725                             sk->sk_state == TCP_CLOSE ||
1726                             (sk->sk_shutdown & RCV_SHUTDOWN) ||
1727                             !timeo ||
1728                             signal_pending(current))
1729                                 break;
1730                 } else {
1731                         if (sock_flag(sk, SOCK_DONE))
1732                                 break;
1733 
1734                         if (sk->sk_err) {
1735                                 copied = sock_error(sk);
1736                                 break;
1737                         }
1738 
1739                         if (sk->sk_shutdown & RCV_SHUTDOWN)
1740                                 break;
1741 
1742                         if (sk->sk_state == TCP_CLOSE) {
1743                                 if (!sock_flag(sk, SOCK_DONE)) {
1744                                         /* This occurs when user tries to read
1745                                          * from never connected socket.
1746                                          */
1747                                         copied = -ENOTCONN;
1748                                         break;
1749                                 }
1750                                 break;
1751                         }
1752 
1753                         if (!timeo) {
1754                                 copied = -EAGAIN;
1755                                 break;
1756                         }
1757 
1758                         if (signal_pending(current)) {
1759                                 copied = sock_intr_errno(timeo);
1760                                 break;
1761                         }
1762                 }
1763 
1764                 tcp_cleanup_rbuf(sk, copied);
1765 
1766                 if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) {
1767                         /* Install new reader */
1768                         if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
1769                                 user_recv = current;
1770                                 tp->ucopy.task = user_recv;
1771                                 tp->ucopy.iov = msg->msg_iov;
1772                         }
1773 
1774                         tp->ucopy.len = len;
1775 
1776                         WARN_ON(tp->copied_seq != tp->rcv_nxt &&
1777                                 !(flags & (MSG_PEEK | MSG_TRUNC)));
1778 
1779                         /* Ugly... If prequeue is not empty, we have to
1780                          * process it before releasing socket, otherwise
1781                          * order will be broken at second iteration.
1782                          * More elegant solution is required!!!
1783                          *
1784                          * Look: we have the following (pseudo)queues:
1785                          *
1786                          * 1. packets in flight
1787                          * 2. backlog
1788                          * 3. prequeue
1789                          * 4. receive_queue
1790                          *
1791                          * Each queue can be processed only if the next ones
1792                          * are empty. At this point we have empty receive_queue.
1793                          * But prequeue _can_ be not empty after 2nd iteration,
1794                          * when we jumped to start of loop because backlog
1795                          * processing added something to receive_queue.
1796                          * We cannot release_sock(), because backlog contains
1797                          * packets arrived _after_ prequeued ones.
1798                          *
1799                          * Shortly, algorithm is clear --- to process all
1800                          * the queues in order. We could make it more directly,
1801                          * requeueing packets from backlog to prequeue, if
1802                          * is not empty. It is more elegant, but eats cycles,
1803                          * unfortunately.
1804                          */
1805                         if (!skb_queue_empty(&tp->ucopy.prequeue))
1806                                 goto do_prequeue;
1807 
1808                         /* __ Set realtime policy in scheduler __ */
1809                 }
1810 
1811 #ifdef CONFIG_NET_DMA
1812                 if (tp->ucopy.dma_chan) {
1813                         if (tp->rcv_wnd == 0 &&
1814                             !skb_queue_empty(&sk->sk_async_wait_queue)) {
1815                                 tcp_service_net_dma(sk, true);
1816                                 tcp_cleanup_rbuf(sk, copied);
1817                         } else
1818                                 dma_async_issue_pending(tp->ucopy.dma_chan);
1819                 }
1820 #endif
1821                 if (copied >= target) {
1822                         /* Do not sleep, just process backlog. */
1823                         release_sock(sk);
1824                         lock_sock(sk);
1825                 } else
1826                         sk_wait_data(sk, &timeo);
1827 
1828 #ifdef CONFIG_NET_DMA
1829                 tcp_service_net_dma(sk, false);  /* Don't block */
1830                 tp->ucopy.wakeup = 0;
1831 #endif
1832 
1833                 if (user_recv) {
1834                         int chunk;
1835 
1836                         /* __ Restore normal policy in scheduler __ */
1837 
1838                         if ((chunk = len - tp->ucopy.len) != 0) {
1839                                 NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
1840                                 len -= chunk;
1841                                 copied += chunk;
1842                         }
1843 
1844                         if (tp->rcv_nxt == tp->copied_seq &&
1845                             !skb_queue_empty(&tp->ucopy.prequeue)) {
1846 do_prequeue:
1847                                 tcp_prequeue_process(sk);
1848 
1849                                 if ((chunk = len - tp->ucopy.len) != 0) {
1850                                         NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1851                                         len -= chunk;
1852                                         copied += chunk;
1853                                 }
1854                         }
1855                 }
1856                 if ((flags & MSG_PEEK) &&
1857                     (peek_seq - copied - urg_hole != tp->copied_seq)) {
1858                         net_dbg_ratelimited("TCP(%s:%d): Application bug, race in MSG_PEEK\n",
1859                                             current->comm,
1860                                             task_pid_nr(current));
1861                         peek_seq = tp->copied_seq;
1862                 }
1863                 continue;
1864 
1865         found_ok_skb:
1866                 /* Ok so how much can we use? */
1867                 used = skb->len - offset;
1868                 if (len < used)
1869                         used = len;
1870 
1871                 /* Do we have urgent data here? */
1872                 if (tp->urg_data) {
1873                         u32 urg_offset = tp->urg_seq - *seq;
1874                         if (urg_offset < used) {
1875                                 if (!urg_offset) {
1876                                         if (!sock_flag(sk, SOCK_URGINLINE)) {
1877                                                 ++*seq;
1878                                                 urg_hole++;
1879                                                 offset++;
1880                                                 used--;
1881                                                 if (!used)
1882                                                         goto skip_copy;
1883                                         }
1884                                 } else
1885                                         used = urg_offset;
1886                         }
1887                 }
1888 
1889                 if (!(flags & MSG_TRUNC)) {
1890 #ifdef CONFIG_NET_DMA
1891                         if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1892                                 tp->ucopy.dma_chan = net_dma_find_channel();
1893 
1894                         if (tp->ucopy.dma_chan) {
1895                                 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
1896                                         tp->ucopy.dma_chan, skb, offset,
1897                                         msg->msg_iov, used,
1898                                         tp->ucopy.pinned_list);
1899 
1900                                 if (tp->ucopy.dma_cookie < 0) {
1901 
1902                                         pr_alert("%s: dma_cookie < 0\n",
1903                                                  __func__);
1904 
1905                                         /* Exception. Bailout! */
1906                                         if (!copied)
1907                                                 copied = -EFAULT;
1908                                         break;
1909                                 }
1910 
1911                                 dma_async_issue_pending(tp->ucopy.dma_chan);
1912 
1913                                 if ((offset + used) == skb->len)
1914                                         copied_early = true;
1915 
1916                         } else
1917 #endif
1918                         {
1919                                 err = skb_copy_datagram_iovec(skb, offset,
1920                                                 msg->msg_iov, used);
1921                                 if (err) {
1922                                         /* Exception. Bailout! */
1923                                         if (!copied)
1924                                                 copied = -EFAULT;
1925                                         break;
1926                                 }
1927                         }
1928                 }
1929 
1930                 *seq += used;
1931                 copied += used;
1932                 len -= used;
1933 
1934                 tcp_rcv_space_adjust(sk);
1935 
1936 skip_copy:
1937                 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
1938                         tp->urg_data = 0;
1939                         tcp_fast_path_check(sk);
1940                 }
1941                 if (used + offset < skb->len)
1942                         continue;
1943 
1944                 if (tcp_hdr(skb)->fin)
1945                         goto found_fin_ok;
1946                 if (!(flags & MSG_PEEK)) {
1947                         sk_eat_skb(sk, skb, copied_early);
1948                         copied_early = false;
1949                 }
1950                 continue;
1951 
1952         found_fin_ok:
1953                 /* Process the FIN. */
1954                 ++*seq;
1955                 if (!(flags & MSG_PEEK)) {
1956                         sk_eat_skb(sk, skb, copied_early);
1957                         copied_early = false;
1958                 }
1959                 break;
1960         } while (len > 0);
1961 
1962         if (user_recv) {
1963                 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
1964                         int chunk;
1965 
1966                         tp->ucopy.len = copied > 0 ? len : 0;
1967 
1968                         tcp_prequeue_process(sk);
1969 
1970                         if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
1971                                 NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1972                                 len -= chunk;
1973                                 copied += chunk;
1974                         }
1975                 }
1976 
1977                 tp->ucopy.task = NULL;
1978                 tp->ucopy.len = 0;
1979         }
1980 
1981 #ifdef CONFIG_NET_DMA
1982         tcp_service_net_dma(sk, true);  /* Wait for queue to drain */
1983         tp->ucopy.dma_chan = NULL;
1984 
1985         if (tp->ucopy.pinned_list) {
1986                 dma_unpin_iovec_pages(tp->ucopy.pinned_list);
1987                 tp->ucopy.pinned_list = NULL;
1988         }
1989 #endif
1990 
1991         /* According to UNIX98, msg_name/msg_namelen are ignored
1992          * on connected socket. I was just happy when found this 8) --ANK
1993          */
1994 
1995         /* Clean up data we have read: This will do ACK frames. */
1996         tcp_cleanup_rbuf(sk, copied);
1997 
1998         release_sock(sk);
1999         return copied;
2000 
2001 out:
2002         release_sock(sk);
2003         return err;
2004 
2005 recv_urg:
2006         err = tcp_recv_urg(sk, msg, len, flags);
2007         goto out;
2008 
2009 recv_sndq:
2010         err = tcp_peek_sndq(sk, msg, len);
2011         goto out;
2012 }
2013 EXPORT_SYMBOL(tcp_recvmsg);
2014 
2015 void tcp_set_state(struct sock *sk, int state)
2016 {
2017         int oldstate = sk->sk_state;
2018 
2019         switch (state) {
2020         case TCP_ESTABLISHED:
2021                 if (oldstate != TCP_ESTABLISHED)
2022                         TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
2023                 break;
2024 
2025         case TCP_CLOSE:
2026                 if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
2027                         TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS);
2028 
2029                 sk->sk_prot->unhash(sk);
2030                 if (inet_csk(sk)->icsk_bind_hash &&
2031                     !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
2032                         inet_put_port(sk);
2033                 /* fall through */
2034         default:
2035                 if (oldstate == TCP_ESTABLISHED)
2036                         TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
2037         }
2038 
2039         /* Change state AFTER socket is unhashed to avoid closed
2040          * socket sitting in hash tables.
2041          */
2042         sk->sk_state = state;
2043 
2044 #ifdef STATE_TRACE
2045         SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]);
2046 #endif
2047 }
2048 EXPORT_SYMBOL_GPL(tcp_set_state);
2049 
2050 /*
2051  *      State processing on a close. This implements the state shift for
2052  *      sending our FIN frame. Note that we only send a FIN for some
2053  *      states. A shutdown() may have already sent the FIN, or we may be
2054  *      closed.
2055  */
2056 
2057 static const unsigned char new_state[16] = {
2058   /* current state:        new state:      action:      */
2059   /* (Invalid)          */ TCP_CLOSE,
2060   /* TCP_ESTABLISHED    */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
2061   /* TCP_SYN_SENT       */ TCP_CLOSE,
2062   /* TCP_SYN_RECV       */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
2063   /* TCP_FIN_WAIT1      */ TCP_FIN_WAIT1,
2064   /* TCP_FIN_WAIT2      */ TCP_FIN_WAIT2,
2065   /* TCP_TIME_WAIT      */ TCP_CLOSE,
2066   /* TCP_CLOSE          */ TCP_CLOSE,
2067   /* TCP_CLOSE_WAIT     */ TCP_LAST_ACK  | TCP_ACTION_FIN,
2068   /* TCP_LAST_ACK       */ TCP_LAST_ACK,
2069   /* TCP_LISTEN         */ TCP_CLOSE,
2070   /* TCP_CLOSING        */ TCP_CLOSING,
2071 };
2072 
2073 static int tcp_close_state(struct sock *sk)
2074 {
2075         int next = (int)new_state[sk->sk_state];
2076         int ns = next & TCP_STATE_MASK;
2077 
2078         tcp_set_state(sk, ns);
2079 
2080         return next & TCP_ACTION_FIN;
2081 }
2082 
2083 /*
2084  *      Shutdown the sending side of a connection. Much like close except
2085  *      that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD).
2086  */
2087 
2088 void tcp_shutdown(struct sock *sk, int how)
2089 {
2090         /*      We need to grab some memory, and put together a FIN,
2091          *      and then put it into the queue to be sent.
2092          *              Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
2093          */
2094         if (!(how & SEND_SHUTDOWN))
2095                 return;
2096 
2097         /* If we've already sent a FIN, or it's a closed state, skip this. */
2098         if ((1 << sk->sk_state) &
2099             (TCPF_ESTABLISHED | TCPF_SYN_SENT |
2100              TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
2101                 /* Clear out any half completed packets.  FIN if needed. */
2102                 if (tcp_close_state(sk))
2103                         tcp_send_fin(sk);
2104         }
2105 }
2106 EXPORT_SYMBOL(tcp_shutdown);
2107 
2108 bool tcp_check_oom(struct sock *sk, int shift)
2109 {
2110         bool too_many_orphans, out_of_socket_memory;
2111 
2112         too_many_orphans = tcp_too_many_orphans(sk, shift);
2113         out_of_socket_memory = tcp_out_of_memory(sk);
2114 
2115         if (too_many_orphans)
2116                 net_info_ratelimited("too many orphaned sockets\n");
2117         if (out_of_socket_memory)
2118                 net_info_ratelimited("out of memory -- consider tuning tcp_mem\n");
2119         return too_many_orphans || out_of_socket_memory;
2120 }
2121 
2122 void tcp_close(struct sock *sk, long timeout)
2123 {
2124         struct sk_buff *skb;
2125         int data_was_unread = 0;
2126         int state;
2127 
2128         lock_sock(sk);
2129         sk->sk_shutdown = SHUTDOWN_MASK;
2130 
2131         if (sk->sk_state == TCP_LISTEN) {
2132                 tcp_set_state(sk, TCP_CLOSE);
2133 
2134                 /* Special case. */
2135                 inet_csk_listen_stop(sk);
2136 
2137                 goto adjudge_to_death;
2138         }
2139 
2140         /*  We need to flush the recv. buffs.  We do this only on the
2141          *  descriptor close, not protocol-sourced closes, because the
2142          *  reader process may not have drained the data yet!
2143          */
2144         while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
2145                 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
2146                           tcp_hdr(skb)->fin;
2147                 data_was_unread += len;
2148                 __kfree_skb(skb);
2149         }
2150 
2151         sk_mem_reclaim(sk);
2152 
2153         /* If socket has been already reset (e.g. in tcp_reset()) - kill it. */
2154         if (sk->sk_state == TCP_CLOSE)
2155                 goto adjudge_to_death;
2156 
2157         /* As outlined in RFC 2525, section 2.17, we send a RST here because
2158          * data was lost. To witness the awful effects of the old behavior of
2159          * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk
2160          * GET in an FTP client, suspend the process, wait for the client to
2161          * advertise a zero window, then kill -9 the FTP client, wheee...
2162          * Note: timeout is always zero in such a case.
2163          */
2164         if (unlikely(tcp_sk(sk)->repair)) {
2165                 sk->sk_prot->disconnect(sk, 0);
2166         } else if (data_was_unread) {
2167                 /* Unread data was tossed, zap the connection. */
2168                 NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
2169                 tcp_set_state(sk, TCP_CLOSE);
2170                 tcp_send_active_reset(sk, sk->sk_allocation);
2171         } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
2172                 /* Check zero linger _after_ checking for unread data. */
2173                 sk->sk_prot->disconnect(sk, 0);
2174                 NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
2175         } else if (tcp_close_state(sk)) {
2176                 /* We FIN if the application ate all the data before
2177                  * zapping the connection.
2178                  */
2179 
2180                 /* RED-PEN. Formally speaking, we have broken TCP state
2181                  * machine. State transitions:
2182                  *
2183                  * TCP_ESTABLISHED -> TCP_FIN_WAIT1
2184                  * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
2185                  * TCP_CLOSE_WAIT -> TCP_LAST_ACK
2186                  *
2187                  * are legal only when FIN has been sent (i.e. in window),
2188                  * rather than queued out of window. Purists blame.
2189                  *
2190                  * F.e. "RFC state" is ESTABLISHED,
2191                  * if Linux state is FIN-WAIT-1, but FIN is still not sent.
2192                  *
2193                  * The visible declinations are that sometimes
2194                  * we enter time-wait state, when it is not required really
2195                  * (harmless), do not send active resets, when they are
2196                  * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
2197                  * they look as CLOSING or LAST_ACK for Linux)
2198                  * Probably, I missed some more holelets.
2199                  *                                              --ANK
2200                  * XXX (TFO) - To start off we don't support SYN+ACK+FIN
2201                  * in a single packet! (May consider it later but will
2202                  * probably need API support or TCP_CORK SYN-ACK until
2203                  * data is written and socket is closed.)
2204                  */
2205                 tcp_send_fin(sk);
2206         }
2207 
2208         sk_stream_wait_close(sk, timeout);
2209 
2210 adjudge_to_death:
2211         state = sk->sk_state;
2212         sock_hold(sk);
2213         sock_orphan(sk);
2214 
2215         /* It is the last release_sock in its life. It will remove backlog. */
2216         release_sock(sk);
2217 
2218 
2219         /* Now socket is owned by kernel and we acquire BH lock
2220            to finish close. No need to check for user refs.
2221          */
2222         local_bh_disable();
2223         bh_lock_sock(sk);
2224         WARN_ON(sock_owned_by_user(sk));
2225 
2226         percpu_counter_inc(sk->sk_prot->orphan_count);
2227 
2228         /* Have we already been destroyed by a softirq or backlog? */
2229         if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
2230                 goto out;
2231 
2232         /*      This is a (useful) BSD violating of the RFC. There is a
2233          *      problem with TCP as specified in that the other end could
2234          *      keep a socket open forever with no application left this end.
2235          *      We use a 1 minute timeout (about the same as BSD) then kill
2236          *      our end. If they send after that then tough - BUT: long enough
2237          *      that we won't make the old 4*rto = almost no time - whoops
2238          *      reset mistake.
2239          *
2240          *      Nope, it was not mistake. It is really desired behaviour
2241          *      f.e. on http servers, when such sockets are useless, but
2242          *      consume significant resources. Let's do it with special
2243          *      linger2 option.                                 --ANK
2244          */
2245 
2246         if (sk->sk_state == TCP_FIN_WAIT2) {
2247                 struct tcp_sock *tp = tcp_sk(sk);
2248                 if (tp->linger2 < 0) {
2249                         tcp_set_state(sk, TCP_CLOSE);
2250                         tcp_send_active_reset(sk, GFP_ATOMIC);
2251                         NET_INC_STATS_BH(sock_net(sk),
2252                                         LINUX_MIB_TCPABORTONLINGER);
2253                 } else {
2254                         const int tmo = tcp_fin_time(sk);
2255 
2256                         if (tmo > TCP_TIMEWAIT_LEN) {
2257                                 inet_csk_reset_keepalive_timer(sk,
2258                                                 tmo - TCP_TIMEWAIT_LEN);
2259                         } else {
2260                                 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
2261                                 goto out;
2262                         }
2263                 }
2264         }
2265         if (sk->sk_state != TCP_CLOSE) {
2266                 sk_mem_reclaim(sk);
2267                 if (tcp_check_oom(sk, 0)) {
2268                         tcp_set_state(sk, TCP_CLOSE);
2269                         tcp_send_active_reset(sk, GFP_ATOMIC);
2270                         NET_INC_STATS_BH(sock_net(sk),
2271                                         LINUX_MIB_TCPABORTONMEMORY);
2272                 }
2273         }
2274 
2275         if (sk->sk_state == TCP_CLOSE) {
2276                 struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
2277                 /* We could get here with a non-NULL req if the socket is
2278                  * aborted (e.g., closed with unread data) before 3WHS
2279                  * finishes.
2280                  */
2281                 if (req != NULL)
2282                         reqsk_fastopen_remove(sk, req, false);
2283                 inet_csk_destroy_sock(sk);
2284         }
2285         /* Otherwise, socket is reprieved until protocol close. */
2286 
2287 out:
2288         bh_unlock_sock(sk);
2289         local_bh_enable();
2290         sock_put(sk);
2291 }
2292 EXPORT_SYMBOL(tcp_close);
2293 
2294 /* These states need RST on ABORT according to RFC793 */
2295 
2296 static inline bool tcp_need_reset(int state)
2297 {
2298         return (1 << state) &
2299                (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
2300                 TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
2301 }
2302 
2303 int tcp_disconnect(struct sock *sk, int flags)
2304 {
2305         struct inet_sock *inet = inet_sk(sk);
2306         struct inet_connection_sock *icsk = inet_csk(sk);
2307         struct tcp_sock *tp = tcp_sk(sk);
2308         int err = 0;
2309         int old_state = sk->sk_state;
2310 
2311         if (old_state != TCP_CLOSE)
2312                 tcp_set_state(sk, TCP_CLOSE);
2313 
2314         /* ABORT function of RFC793 */
2315         if (old_state == TCP_LISTEN) {
2316                 inet_csk_listen_stop(sk);
2317         } else if (unlikely(tp->repair)) {
2318                 sk->sk_err = ECONNABORTED;
2319         } else if (tcp_need_reset(old_state) ||
2320                    (tp->snd_nxt != tp->write_seq &&
2321                     (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
2322                 /* The last check adjusts for discrepancy of Linux wrt. RFC
2323                  * states
2324                  */
2325                 tcp_send_active_reset(sk, gfp_any());
2326                 sk->sk_err = ECONNRESET;
2327         } else if (old_state == TCP_SYN_SENT)
2328                 sk->sk_err = ECONNRESET;
2329 
2330         tcp_clear_xmit_timers(sk);
2331         __skb_queue_purge(&sk->sk_receive_queue);
2332         tcp_write_queue_purge(sk);
2333         __skb_queue_purge(&tp->out_of_order_queue);
2334 #ifdef CONFIG_NET_DMA
2335         __skb_queue_purge(&sk->sk_async_wait_queue);
2336 #endif
2337 
2338         inet->inet_dport = 0;
2339 
2340         if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
2341                 inet_reset_saddr(sk);
2342 
2343         sk->sk_shutdown = 0;
2344         sock_reset_flag(sk, SOCK_DONE);
2345         tp->srtt_us = 0;
2346         if ((tp->write_seq += tp->max_window + 2) == 0)
2347                 tp->write_seq = 1;
2348         icsk->icsk_backoff = 0;
2349         tp->snd_cwnd = 2;
2350         icsk->icsk_probes_out = 0;
2351         tp->packets_out = 0;
2352         tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
2353         tp->snd_cwnd_cnt = 0;
2354         tp->window_clamp = 0;
2355         tcp_set_ca_state(sk, TCP_CA_Open);
2356         tcp_clear_retrans(tp);
2357         inet_csk_delack_init(sk);
2358         tcp_init_send_head(sk);
2359         memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
2360         __sk_dst_reset(sk);
2361 
2362         WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
2363 
2364         sk->sk_error_report(sk);
2365         return err;
2366 }
2367 EXPORT_SYMBOL(tcp_disconnect);
2368 
2369 void tcp_sock_destruct(struct sock *sk)
2370 {
2371         inet_sock_destruct(sk);
2372 
2373         kfree(inet_csk(sk)->icsk_accept_queue.fastopenq);
2374 }
2375 
2376 static inline bool tcp_can_repair_sock(const struct sock *sk)
2377 {
2378         return ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) &&
2379                 ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_ESTABLISHED));
2380 }
2381 
2382 static int tcp_repair_options_est(struct tcp_sock *tp,
2383                 struct tcp_repair_opt __user *optbuf, unsigned int len)
2384 {
2385         struct tcp_repair_opt opt;
2386 
2387         while (len >= sizeof(opt)) {
2388                 if (copy_from_user(&opt, optbuf, sizeof(opt)))
2389                         return -EFAULT;
2390 
2391                 optbuf++;
2392                 len -= sizeof(opt);
2393 
2394                 switch (opt.opt_code) {
2395                 case TCPOPT_MSS:
2396                         tp->rx_opt.mss_clamp = opt.opt_val;
2397                         break;
2398                 case TCPOPT_WINDOW:
2399                         {
2400                                 u16 snd_wscale = opt.opt_val & 0xFFFF;
2401                                 u16 rcv_wscale = opt.opt_val >> 16;
2402 
2403                                 if (snd_wscale > 14 || rcv_wscale > 14)
2404                                         return -EFBIG;
2405 
2406                                 tp->rx_opt.snd_wscale = snd_wscale;
2407                                 tp->rx_opt.rcv_wscale = rcv_wscale;
2408                                 tp->rx_opt.wscale_ok = 1;
2409                         }
2410                         break;
2411                 case TCPOPT_SACK_PERM:
2412                         if (opt.opt_val != 0)
2413                                 return -EINVAL;
2414 
2415                         tp->rx_opt.sack_ok |= TCP_SACK_SEEN;
2416                         if (sysctl_tcp_fack)
2417                                 tcp_enable_fack(tp);
2418                         break;
2419                 case TCPOPT_TIMESTAMP:
2420                         if (opt.opt_val != 0)
2421                                 return -EINVAL;
2422 
2423                         tp->rx_opt.tstamp_ok = 1;
2424                         break;
2425                 }
2426         }
2427 
2428         return 0;
2429 }
2430 
2431 /*
2432  *      Socket option code for TCP.
2433  */
2434 static int do_tcp_setsockopt(struct sock *sk, int level,
2435                 int optname, char __user *optval, unsigned int optlen)
2436 {
2437         struct tcp_sock *tp = tcp_sk(sk);
2438         struct inet_connection_sock *icsk = inet_csk(sk);
2439         int val;
2440         int err = 0;
2441 
2442         /* These are data/string values, all the others are ints */
2443         switch (optname) {
2444         case TCP_CONGESTION: {
2445                 char name[TCP_CA_NAME_MAX];
2446 
2447                 if (optlen < 1)
2448                         return -EINVAL;
2449 
2450                 val = strncpy_from_user(name, optval,
2451                                         min_t(long, TCP_CA_NAME_MAX-1, optlen));
2452                 if (val < 0)
2453                         return -EFAULT;
2454                 name[val] = 0;
2455 
2456                 lock_sock(sk);
2457                 err = tcp_set_congestion_control(sk, name);
2458                 release_sock(sk);
2459                 return err;
2460         }
2461         default:
2462                 /* fallthru */
2463                 break;
2464         }
2465 
2466         if (optlen < sizeof(int))
2467                 return -EINVAL;
2468 
2469         if (get_user(val, (int __user *)optval))
2470                 return -EFAULT;
2471 
2472         lock_sock(sk);
2473 
2474         switch (optname) {
2475         case TCP_MAXSEG:
2476                 /* Values greater than interface MTU won't take effect. However
2477                  * at the point when this call is done we typically don't yet
2478                  * know which interface is going to be used */
2479                 if (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW) {
2480                         err = -EINVAL;
2481                         break;
2482                 }
2483                 tp->rx_opt.user_mss = val;
2484                 break;
2485 
2486         case TCP_NODELAY:
2487                 if (val) {
2488                         /* TCP_NODELAY is weaker than TCP_CORK, so that
2489                          * this option on corked socket is remembered, but
2490                          * it is not activated until cork is cleared.
2491                          *
2492                          * However, when TCP_NODELAY is set we make
2493                          * an explicit push, which overrides even TCP_CORK
2494                          * for currently queued segments.
2495                          */
2496                         tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
2497                         tcp_push_pending_frames(sk);
2498                 } else {
2499                         tp->nonagle &= ~TCP_NAGLE_OFF;
2500                 }
2501                 break;
2502 
2503         case TCP_THIN_LINEAR_TIMEOUTS:
2504                 if (val < 0 || val > 1)
2505                         err = -EINVAL;
2506                 else
2507                         tp->thin_lto = val;
2508                 break;
2509 
2510         case TCP_THIN_DUPACK:
2511                 if (val < 0 || val > 1)
2512                         err = -EINVAL;
2513                 else {
2514                         tp->thin_dupack = val;
2515                         if (tp->thin_dupack)
2516                                 tcp_disable_early_retrans(tp);
2517                 }
2518                 break;
2519 
2520         case TCP_REPAIR:
2521                 if (!tcp_can_repair_sock(sk))
2522                         err = -EPERM;
2523                 else if (val == 1) {
2524                         tp->repair = 1;
2525                         sk->sk_reuse = SK_FORCE_REUSE;
2526                         tp->repair_queue = TCP_NO_QUEUE;
2527                 } else if (val == 0) {
2528                         tp->repair = 0;
2529                         sk->sk_reuse = SK_NO_REUSE;
2530                         tcp_send_window_probe(sk);
2531                 } else
2532                         err = -EINVAL;
2533 
2534                 break;
2535 
2536         case TCP_REPAIR_QUEUE:
2537                 if (!tp->repair)
2538                         err = -EPERM;
2539                 else if (val < TCP_QUEUES_NR)
2540                         tp->repair_queue = val;
2541                 else
2542                         err = -EINVAL;
2543                 break;
2544 
2545         case TCP_QUEUE_SEQ:
2546                 if (sk->sk_state != TCP_CLOSE)
2547                         err = -EPERM;
2548                 else if (tp->repair_queue == TCP_SEND_QUEUE)
2549                         tp->write_seq = val;
2550                 else if (tp->repair_queue == TCP_RECV_QUEUE)
2551                         tp->rcv_nxt = val;
2552                 else
2553                         err = -EINVAL;
2554                 break;
2555 
2556         case TCP_REPAIR_OPTIONS:
2557                 if (!tp->repair)
2558                         err = -EINVAL;
2559                 else if (sk->sk_state == TCP_ESTABLISHED)
2560                         err = tcp_repair_options_est(tp,
2561                                         (struct tcp_repair_opt __user *)optval,
2562                                         optlen);
2563                 else
2564                         err = -EPERM;
2565                 break;
2566 
2567         case TCP_CORK:
2568                 /* When set indicates to always queue non-full frames.
2569                  * Later the user clears this option and we transmit
2570                  * any pending partial frames in the queue.  This is
2571                  * meant to be used alongside sendfile() to get properly
2572                  * filled frames when the user (for example) must write
2573                  * out headers with a write() call first and then use
2574                  * sendfile to send out the data parts.
2575                  *
2576                  * TCP_CORK can be set together with TCP_NODELAY and it is
2577                  * stronger than TCP_NODELAY.
2578                  */
2579                 if (val) {
2580                         tp->nonagle |= TCP_NAGLE_CORK;
2581                 } else {
2582                         tp->nonagle &= ~TCP_NAGLE_CORK;
2583                         if (tp->nonagle&TCP_NAGLE_OFF)
2584                                 tp->nonagle |= TCP_NAGLE_PUSH;
2585                         tcp_push_pending_frames(sk);
2586                 }
2587                 break;
2588 
2589         case TCP_KEEPIDLE:
2590                 if (val < 1 || val > MAX_TCP_KEEPIDLE)
2591                         err = -EINVAL;
2592                 else {
2593                         tp->keepalive_time = val * HZ;
2594                         if (sock_flag(sk, SOCK_KEEPOPEN) &&
2595                             !((1 << sk->sk_state) &
2596                               (TCPF_CLOSE | TCPF_LISTEN))) {
2597                                 u32 elapsed = keepalive_time_elapsed(tp);
2598                                 if (tp->keepalive_time > elapsed)
2599                                         elapsed = tp->keepalive_time - elapsed;
2600                                 else
2601                                         elapsed = 0;
2602                                 inet_csk_reset_keepalive_timer(sk, elapsed);
2603                         }
2604                 }
2605                 break;
2606         case TCP_KEEPINTVL:
2607                 if (val < 1 || val > MAX_TCP_KEEPINTVL)
2608                         err = -EINVAL;
2609                 else
2610                         tp->keepalive_intvl = val * HZ;
2611                 break;
2612         case TCP_KEEPCNT:
2613                 if (val < 1 || val > MAX_TCP_KEEPCNT)
2614                         err = -EINVAL;
2615                 else
2616                         tp->keepalive_probes = val;
2617                 break;
2618         case TCP_SYNCNT:
2619                 if (val < 1 || val > MAX_TCP_SYNCNT)
2620                         err = -EINVAL;
2621                 else
2622                         icsk->icsk_syn_retries = val;
2623                 break;
2624 
2625         case TCP_LINGER2:
2626                 if (val < 0)
2627                         tp->linger2 = -1;
2628                 else if (val > sysctl_tcp_fin_timeout / HZ)
2629                         tp->linger2 = 0;
2630                 else
2631                         tp->linger2 = val * HZ;
2632                 break;
2633 
2634         case TCP_DEFER_ACCEPT:
2635                 /* Translate value in seconds to number of retransmits */
2636                 icsk->icsk_accept_queue.rskq_defer_accept =
2637                         secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ,
2638                                         TCP_RTO_MAX / HZ);
2639                 break;
2640 
2641         case TCP_WINDOW_CLAMP:
2642                 if (!val) {
2643                         if (sk->sk_state != TCP_CLOSE) {
2644                                 err = -EINVAL;
2645                                 break;
2646                         }
2647                         tp->window_clamp = 0;
2648                 } else
2649                         tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
2650                                                 SOCK_MIN_RCVBUF / 2 : val;
2651                 break;
2652 
2653         case TCP_QUICKACK:
2654                 if (!val) {
2655                         icsk->icsk_ack.pingpong = 1;
2656                 } else {
2657                         icsk->icsk_ack.pingpong = 0;
2658                         if ((1 << sk->sk_state) &
2659                             (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
2660                             inet_csk_ack_scheduled(sk)) {
2661                                 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
2662                                 tcp_cleanup_rbuf(sk, 1);
2663                                 if (!(val & 1))
2664                                         icsk->icsk_ack.pingpong = 1;
2665                         }
2666                 }
2667                 break;
2668 
2669 #ifdef CONFIG_TCP_MD5SIG
2670         case TCP_MD5SIG:
2671                 /* Read the IP->Key mappings from userspace */
2672                 err = tp->af_specific->md5_parse(sk, optval, optlen);
2673                 break;
2674 #endif
2675         case TCP_USER_TIMEOUT:
2676                 /* Cap the max timeout in ms TCP will retry/retrans
2677                  * before giving up and aborting (ETIMEDOUT) a connection.
2678                  */
2679                 if (val < 0)
2680                         err = -EINVAL;
2681                 else
2682                         icsk->icsk_user_timeout = msecs_to_jiffies(val);
2683                 break;
2684 
2685         case TCP_FASTOPEN:
2686                 if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE |
2687                     TCPF_LISTEN)))
2688                         err = fastopen_init_queue(sk, val);
2689                 else
2690                         err = -EINVAL;
2691                 break;
2692         case TCP_TIMESTAMP:
2693                 if (!tp->repair)
2694                         err = -EPERM;
2695                 else
2696                         tp->tsoffset = val - tcp_time_stamp;
2697                 break;
2698         case TCP_NOTSENT_LOWAT:
2699                 tp->notsent_lowat = val;
2700                 sk->sk_write_space(sk);
2701                 break;
2702         default:
2703                 err = -ENOPROTOOPT;
2704                 break;
2705         }
2706 
2707         release_sock(sk);
2708         return err;
2709 }
2710 
2711 int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
2712                    unsigned int optlen)
2713 {
2714         const struct inet_connection_sock *icsk = inet_csk(sk);
2715 
2716         if (level != SOL_TCP)
2717                 return icsk->icsk_af_ops->setsockopt(sk, level, optname,
2718                                                      optval, optlen);
2719         return do_tcp_setsockopt(sk, level, optname, optval, optlen);
2720 }
2721 EXPORT_SYMBOL(tcp_setsockopt);
2722 
2723 #ifdef CONFIG_COMPAT
2724 int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
2725                           char __user *optval, unsigned int optlen)
2726 {
2727         if (level != SOL_TCP)
2728                 return inet_csk_compat_setsockopt(sk, level, optname,
2729                                                   optval, optlen);
2730         return do_tcp_setsockopt(sk, level, optname, optval, optlen);
2731 }
2732 EXPORT_SYMBOL(compat_tcp_setsockopt);
2733 #endif
2734 
2735 /* Return information about state of tcp endpoint in API format. */
2736 void tcp_get_info(const struct sock *sk, struct tcp_info *info)
2737 {
2738         const struct tcp_sock *tp = tcp_sk(sk);
2739         const struct inet_connection_sock *icsk = inet_csk(sk);
2740         u32 now = tcp_time_stamp;
2741 
2742         memset(info, 0, sizeof(*info));
2743 
2744         info->tcpi_state = sk->sk_state;
2745         info->tcpi_ca_state = icsk->icsk_ca_state;
2746         info->tcpi_retransmits = icsk->icsk_retransmits;
2747         info->tcpi_probes = icsk->icsk_probes_out;
2748         info->tcpi_backoff = icsk->icsk_backoff;
2749 
2750         if (tp->rx_opt.tstamp_ok)
2751                 info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
2752         if (tcp_is_sack(tp))
2753                 info->tcpi_options |= TCPI_OPT_SACK;
2754         if (tp->rx_opt.wscale_ok) {
2755                 info->tcpi_options |= TCPI_OPT_WSCALE;
2756                 info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
2757                 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
2758         }
2759 
2760         if (tp->ecn_flags & TCP_ECN_OK)
2761                 info->tcpi_options |= TCPI_OPT_ECN;
2762         if (tp->ecn_flags & TCP_ECN_SEEN)
2763                 info->tcpi_options |= TCPI_OPT_ECN_SEEN;
2764         if (tp->syn_data_acked)
2765                 info->tcpi_options |= TCPI_OPT_SYN_DATA;
2766 
2767         info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
2768         info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
2769         info->tcpi_snd_mss = tp->mss_cache;
2770         info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
2771 
2772         if (sk->sk_state == TCP_LISTEN) {
2773                 info->tcpi_unacked = sk->sk_ack_backlog;
2774                 info->tcpi_sacked = sk->sk_max_ack_backlog;
2775         } else {
2776                 info->tcpi_unacked = tp->packets_out;
2777                 info->tcpi_sacked = tp->sacked_out;
2778         }
2779         info->tcpi_lost = tp->lost_out;
2780         info->tcpi_retrans = tp->retrans_out;
2781         info->tcpi_fackets = tp->fackets_out;
2782 
2783         info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
2784         info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
2785         info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
2786 
2787         info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
2788         info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
2789         info->tcpi_rtt = tp->srtt_us >> 3;
2790         info->tcpi_rttvar = tp->mdev_us >> 2;
2791         info->tcpi_snd_ssthresh = tp->snd_ssthresh;
2792         info->tcpi_snd_cwnd = tp->snd_cwnd;
2793         info->tcpi_advmss = tp->advmss;
2794         info->tcpi_reordering = tp->reordering;
2795 
2796         info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;
2797         info->tcpi_rcv_space = tp->rcvq_space.space;
2798 
2799         info->tcpi_total_retrans = tp->total_retrans;
2800 
2801         info->tcpi_pacing_rate = sk->sk_pacing_rate != ~0U ?
2802                                         sk->sk_pacing_rate : ~0ULL;
2803         info->tcpi_max_pacing_rate = sk->sk_max_pacing_rate != ~0U ?
2804                                         sk->sk_max_pacing_rate : ~0ULL;
2805 }
2806 EXPORT_SYMBOL_GPL(tcp_get_info);
2807 
2808 static int do_tcp_getsockopt(struct sock *sk, int level,
2809                 int optname, char __user *optval, int __user *optlen)
2810 {
2811         struct inet_connection_sock *icsk = inet_csk(sk);
2812         struct tcp_sock *tp = tcp_sk(sk);
2813         int val, len;
2814 
2815         if (get_user(len, optlen))
2816                 return -EFAULT;
2817 
2818         len = min_t(unsigned int, len, sizeof(int));
2819 
2820         if (len < 0)
2821                 return -EINVAL;
2822 
2823         switch (optname) {
2824         case TCP_MAXSEG:
2825                 val = tp->mss_cache;
2826                 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
2827                         val = tp->rx_opt.user_mss;
2828                 if (tp->repair)
2829                         val = tp->rx_opt.mss_clamp;
2830                 break;
2831         case TCP_NODELAY:
2832                 val = !!(tp->nonagle&TCP_NAGLE_OFF);
2833                 break;
2834         case TCP_CORK:
2835                 val = !!(tp->nonagle&TCP_NAGLE_CORK);
2836                 break;
2837         case TCP_KEEPIDLE:
2838                 val = keepalive_time_when(tp) / HZ;
2839                 break;
2840         case TCP_KEEPINTVL:
2841                 val = keepalive_intvl_when(tp) / HZ;
2842                 break;
2843         case TCP_KEEPCNT:
2844                 val = keepalive_probes(tp);
2845                 break;
2846         case TCP_SYNCNT:
2847                 val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
2848                 break;
2849         case TCP_LINGER2:
2850                 val = tp->linger2;
2851                 if (val >= 0)
2852                         val = (val ? : sysctl_tcp_fin_timeout) / HZ;
2853                 break;
2854         case TCP_DEFER_ACCEPT:
2855                 val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept,
2856                                       TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ);
2857                 break;
2858         case TCP_WINDOW_CLAMP:
2859                 val = tp->window_clamp;
2860                 break;
2861         case TCP_INFO: {
2862                 struct tcp_info info;
2863 
2864                 if (get_user(len, optlen))
2865                         return -EFAULT;
2866 
2867                 tcp_get_info(sk, &info);
2868 
2869                 len = min_t(unsigned int, len, sizeof(info));
2870                 if (put_user(len, optlen))
2871                         return -EFAULT;
2872                 if (copy_to_user(optval, &info, len))
2873                         return -EFAULT;
2874                 return 0;
2875         }
2876         case TCP_QUICKACK:
2877                 val = !icsk->icsk_ack.pingpong;
2878                 break;
2879 
2880         case TCP_CONGESTION:
2881                 if (get_user(len, optlen))
2882                         return -EFAULT;
2883                 len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
2884                 if (put_user(len, optlen))
2885                         return -EFAULT;
2886                 if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
2887                         return -EFAULT;
2888                 return 0;
2889 
2890         case TCP_THIN_LINEAR_TIMEOUTS:
2891                 val = tp->thin_lto;
2892                 break;
2893         case TCP_THIN_DUPACK:
2894                 val = tp->thin_dupack;
2895                 break;
2896 
2897         case TCP_REPAIR:
2898                 val = tp->repair;
2899                 break;
2900 
2901         case TCP_REPAIR_QUEUE:
2902                 if (tp->repair)
2903                         val = tp->repair_queue;
2904                 else
2905                         return -EINVAL;
2906                 break;
2907 
2908         case TCP_QUEUE_SEQ:
2909                 if (tp->repair_queue == TCP_SEND_QUEUE)
2910                         val = tp->write_seq;
2911                 else if (tp->repair_queue == TCP_RECV_QUEUE)
2912                         val = tp->rcv_nxt;
2913                 else
2914                         return -EINVAL;
2915                 break;
2916 
2917         case TCP_USER_TIMEOUT:
2918                 val = jiffies_to_msecs(icsk->icsk_user_timeout);
2919                 break;
2920 
2921         case TCP_FASTOPEN:
2922                 if (icsk->icsk_accept_queue.fastopenq != NULL)
2923                         val = icsk->icsk_accept_queue.fastopenq->max_qlen;
2924                 else
2925                         val = 0;
2926                 break;
2927 
2928         case TCP_TIMESTAMP:
2929                 val = tcp_time_stamp + tp->tsoffset;
2930                 break;
2931         case TCP_NOTSENT_LOWAT:
2932                 val = tp->notsent_lowat;
2933                 break;
2934         default:
2935                 return -ENOPROTOOPT;
2936         }
2937 
2938         if (put_user(len, optlen))
2939                 return -EFAULT;
2940         if (copy_to_user(optval, &val, len))
2941                 return -EFAULT;
2942         return 0;
2943 }
2944 
2945 int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
2946                    int __user *optlen)
2947 {
2948         struct inet_connection_sock *icsk = inet_csk(sk);
2949 
2950         if (level != SOL_TCP)
2951                 return icsk->icsk_af_ops->getsockopt(sk, level, optname,
2952                                                      optval, optlen);
2953         return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2954 }
2955 EXPORT_SYMBOL(tcp_getsockopt);
2956 
2957 #ifdef CONFIG_COMPAT
2958 int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
2959                           char __user *optval, int __user *optlen)
2960 {
2961         if (level != SOL_TCP)
2962                 return inet_csk_compat_getsockopt(sk, level, optname,
2963                                                   optval, optlen);
2964         return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2965 }
2966 EXPORT_SYMBOL(compat_tcp_getsockopt);
2967 #endif
2968 
2969 #ifdef CONFIG_TCP_MD5SIG
2970 static struct tcp_md5sig_pool __percpu *tcp_md5sig_pool __read_mostly;
2971 static DEFINE_MUTEX(tcp_md5sig_mutex);
2972 
2973 static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool __percpu *pool)
2974 {
2975         int cpu;
2976 
2977         for_each_possible_cpu(cpu) {
2978                 struct tcp_md5sig_pool *p = per_cpu_ptr(pool, cpu);
2979 
2980                 if (p->md5_desc.tfm)
2981                         crypto_free_hash(p->md5_desc.tfm);
2982         }
2983         free_percpu(pool);
2984 }
2985 
2986 static void __tcp_alloc_md5sig_pool(void)
2987 {
2988         int cpu;
2989         struct tcp_md5sig_pool __percpu *pool;
2990 
2991         pool = alloc_percpu(struct tcp_md5sig_pool);
2992         if (!pool)
2993                 return;
2994 
2995         for_each_possible_cpu(cpu) {
2996                 struct crypto_hash *hash;
2997 
2998                 hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
2999                 if (IS_ERR_OR_NULL(hash))
3000                         goto out_free;
3001 
3002                 per_cpu_ptr(pool, cpu)->md5_desc.tfm = hash;
3003         }
3004         /* before setting tcp_md5sig_pool, we must commit all writes
3005          * to memory. See ACCESS_ONCE() in tcp_get_md5sig_pool()
3006          */
3007         smp_wmb();
3008         tcp_md5sig_pool = pool;
3009         return;
3010 out_free:
3011         __tcp_free_md5sig_pool(pool);
3012 }
3013 
3014 bool tcp_alloc_md5sig_pool(void)
3015 {
3016         if (unlikely(!tcp_md5sig_pool)) {
3017                 mutex_lock(&tcp_md5sig_mutex);
3018 
3019                 if (!tcp_md5sig_pool)
3020                         __tcp_alloc_md5sig_pool();
3021 
3022                 mutex_unlock(&tcp_md5sig_mutex);
3023         }
3024         return tcp_md5sig_pool != NULL;
3025 }
3026 EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
3027 
3028 
3029 /**
3030  *      tcp_get_md5sig_pool - get md5sig_pool for this user
3031  *
3032  *      We use percpu structure, so if we succeed, we exit with preemption
3033  *      and BH disabled, to make sure another thread or softirq handling
3034  *      wont try to get same context.
3035  */
3036 struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
3037 {
3038         struct tcp_md5sig_pool __percpu *p;
3039 
3040         local_bh_disable();
3041         p = ACCESS_ONCE(tcp_md5sig_pool);
3042         if (p)
3043                 return __this_cpu_ptr(p);
3044 
3045         local_bh_enable();
3046         return NULL;
3047 }
3048 EXPORT_SYMBOL(tcp_get_md5sig_pool);
3049 
3050 int tcp_md5_hash_header(struct tcp_md5sig_pool *hp,
3051                         const struct tcphdr *th)
3052 {
3053         struct scatterlist sg;
3054         struct tcphdr hdr;
3055         int err;
3056 
3057         /* We are not allowed to change tcphdr, make a local copy */
3058         memcpy(&hdr, th, sizeof(hdr));
3059         hdr.check = 0;
3060 
3061         /* options aren't included in the hash */
3062         sg_init_one(&sg, &hdr, sizeof(hdr));
3063         err = crypto_hash_update(&hp->md5_desc, &sg, sizeof(hdr));
3064         return err;
3065 }
3066 EXPORT_SYMBOL(tcp_md5_hash_header);
3067 
3068 int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
3069                           const struct sk_buff *skb, unsigned int header_len)
3070 {
3071         struct scatterlist sg;
3072         const struct tcphdr *tp = tcp_hdr(skb);
3073         struct hash_desc *desc = &hp->md5_desc;
3074         unsigned int i;
3075         const unsigned int head_data_len = skb_headlen(skb) > header_len ?
3076                                            skb_headlen(skb) - header_len : 0;
3077         const struct skb_shared_info *shi = skb_shinfo(skb);
3078         struct sk_buff *frag_iter;
3079 
3080         sg_init_table(&sg, 1);
3081 
3082         sg_set_buf(&sg, ((u8 *) tp) + header_len, head_data_len);
3083         if (crypto_hash_update(desc, &sg, head_data_len))
3084                 return 1;
3085 
3086         for (i = 0; i < shi->nr_frags; ++i) {
3087                 const struct skb_frag_struct *f = &shi->frags[i];
3088                 unsigned int offset = f->page_offset;
3089                 struct page *page = skb_frag_page(f) + (offset >> PAGE_SHIFT);
3090 
3091                 sg_set_page(&sg, page, skb_frag_size(f),
3092                             offset_in_page(offset));
3093                 if (crypto_hash_update(desc, &sg, skb_frag_size(f)))
3094                         return 1;
3095         }
3096 
3097         skb_walk_frags(skb, frag_iter)
3098                 if (tcp_md5_hash_skb_data(hp, frag_iter, 0))
3099                         return 1;
3100 
3101         return 0;
3102 }
3103 EXPORT_SYMBOL(tcp_md5_hash_skb_data);
3104 
3105 int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key)
3106 {
3107         struct scatterlist sg;
3108 
3109         sg_init_one(&sg, key->key, key->keylen);
3110         return crypto_hash_update(&hp->md5_desc, &sg, key->keylen);
3111 }
3112 EXPORT_SYMBOL(tcp_md5_hash_key);
3113 
3114 #endif
3115 
3116 void tcp_done(struct sock *sk)
3117 {
3118         struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
3119 
3120         if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
3121                 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
3122 
3123         tcp_set_state(sk, TCP_CLOSE);
3124         tcp_clear_xmit_timers(sk);
3125         if (req != NULL)
3126                 reqsk_fastopen_remove(sk, req, false);
3127 
3128         sk->sk_shutdown = SHUTDOWN_MASK;
3129 
3130         if (!sock_flag(sk, SOCK_DEAD))
3131                 sk->sk_state_change(sk);
3132         else
3133                 inet_csk_destroy_sock(sk);
3134 }
3135 EXPORT_SYMBOL_GPL(tcp_done);
3136 
3137 extern struct tcp_congestion_ops tcp_reno;
3138 
3139 static __initdata unsigned long thash_entries;
3140 static int __init set_thash_entries(char *str)
3141 {
3142         ssize_t ret;
3143 
3144         if (!str)
3145                 return 0;
3146 
3147         ret = kstrtoul(str, 0, &thash_entries);
3148         if (ret)
3149                 return 0;
3150 
3151         return 1;
3152 }
3153 __setup("thash_entries=", set_thash_entries);
3154 
3155 static void tcp_init_mem(void)
3156 {
3157         unsigned long limit = nr_free_buffer_pages() / 8;
3158         limit = max(limit, 128UL);
3159         sysctl_tcp_mem[0] = limit / 4 * 3;
3160         sysctl_tcp_mem[1] = limit;
3161         sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;
3162 }
3163 
3164 void __init tcp_init(void)
3165 {
3166         struct sk_buff *skb = NULL;
3167         unsigned long limit;
3168         int max_rshare, max_wshare, cnt;
3169         unsigned int i;
3170 
3171         BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
3172 
3173         percpu_counter_init(&tcp_sockets_allocated, 0);
3174         percpu_counter_init(&tcp_orphan_count, 0);
3175         tcp_hashinfo.bind_bucket_cachep =
3176                 kmem_cache_create("tcp_bind_bucket",
3177                                   sizeof(struct inet_bind_bucket), 0,
3178                                   SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3179 
3180         /* Size and allocate the main established and bind bucket
3181          * hash tables.
3182          *
3183          * The methodology is similar to that of the buffer cache.
3184          */
3185         tcp_hashinfo.ehash =
3186                 alloc_large_system_hash("TCP established",
3187                                         sizeof(struct inet_ehash_bucket),
3188                                         thash_entries,
3189                                         17, /* one slot per 128 KB of memory */
3190                                         0,
3191                                         NULL,
3192                                         &tcp_hashinfo.ehash_mask,
3193                                         0,
3194                                         thash_entries ? 0 : 512 * 1024);
3195         for (i = 0; i <= tcp_hashinfo.ehash_mask; i++)
3196                 INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i);
3197 
3198         if (inet_ehash_locks_alloc(&tcp_hashinfo))
3199                 panic("TCP: failed to alloc ehash_locks");
3200         tcp_hashinfo.bhash =
3201                 alloc_large_system_hash("TCP bind",
3202                                         sizeof(struct inet_bind_hashbucket),
3203                                         tcp_hashinfo.ehash_mask + 1,
3204                                         17, /* one slot per 128 KB of memory */
3205                                         0,
3206                                         &tcp_hashinfo.bhash_size,
3207                                         NULL,
3208                                         0,
3209                                         64 * 1024);
3210         tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size;
3211         for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
3212                 spin_lock_init(&tcp_hashinfo.bhash[i].lock);
3213                 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
3214         }
3215 
3216 
3217         cnt = tcp_hashinfo.ehash_mask + 1;
3218 
3219         tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
3220         sysctl_tcp_max_orphans = cnt / 2;
3221         sysctl_max_syn_backlog = max(128, cnt / 256);
3222 
3223         tcp_init_mem();
3224         /* Set per-socket limits to no more than 1/128 the pressure threshold */
3225         limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7);
3226         max_wshare = min(4UL*1024*1024, limit);
3227         max_rshare = min(6UL*1024*1024, limit);
3228 
3229         sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
3230         sysctl_tcp_wmem[1] = 16*1024;
3231         sysctl_tcp_wmem[2] = max(64*1024, max_wshare);
3232 
3233         sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
3234         sysctl_tcp_rmem[1] = 87380;
3235         sysctl_tcp_rmem[2] = max(87380, max_rshare);
3236 
3237         pr_info("Hash tables configured (established %u bind %u)\n",
3238                 tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
3239 
3240         tcp_metrics_init();
3241 
3242         tcp_register_congestion_control(&tcp_reno);
3243 
3244         tcp_tasklet_init();
3245 }
3246 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us