Version:  2.0.40 2.2.26 2.4.37 2.6.39 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15

Linux/net/ipv4/tcp.c

  1 /*
  2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
  3  *              operating system.  INET is implemented using the  BSD Socket
  4  *              interface as the means of communication with the user level.
  5  *
  6  *              Implementation of the Transmission Control Protocol(TCP).
  7  *
  8  * Authors:     Ross Biro
  9  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 10  *              Mark Evans, <evansmp@uhura.aston.ac.uk>
 11  *              Corey Minyard <wf-rch!minyard@relay.EU.net>
 12  *              Florian La Roche, <flla@stud.uni-sb.de>
 13  *              Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
 14  *              Linus Torvalds, <torvalds@cs.helsinki.fi>
 15  *              Alan Cox, <gw4pts@gw4pts.ampr.org>
 16  *              Matthew Dillon, <dillon@apollo.west.oic.com>
 17  *              Arnt Gulbrandsen, <agulbra@nvg.unit.no>
 18  *              Jorge Cwik, <jorge@laser.satlink.net>
 19  *
 20  * Fixes:
 21  *              Alan Cox        :       Numerous verify_area() calls
 22  *              Alan Cox        :       Set the ACK bit on a reset
 23  *              Alan Cox        :       Stopped it crashing if it closed while
 24  *                                      sk->inuse=1 and was trying to connect
 25  *                                      (tcp_err()).
 26  *              Alan Cox        :       All icmp error handling was broken
 27  *                                      pointers passed where wrong and the
 28  *                                      socket was looked up backwards. Nobody
 29  *                                      tested any icmp error code obviously.
 30  *              Alan Cox        :       tcp_err() now handled properly. It
 31  *                                      wakes people on errors. poll
 32  *                                      behaves and the icmp error race
 33  *                                      has gone by moving it into sock.c
 34  *              Alan Cox        :       tcp_send_reset() fixed to work for
 35  *                                      everything not just packets for
 36  *                                      unknown sockets.
 37  *              Alan Cox        :       tcp option processing.
 38  *              Alan Cox        :       Reset tweaked (still not 100%) [Had
 39  *                                      syn rule wrong]
 40  *              Herp Rosmanith  :       More reset fixes
 41  *              Alan Cox        :       No longer acks invalid rst frames.
 42  *                                      Acking any kind of RST is right out.
 43  *              Alan Cox        :       Sets an ignore me flag on an rst
 44  *                                      receive otherwise odd bits of prattle
 45  *                                      escape still
 46  *              Alan Cox        :       Fixed another acking RST frame bug.
 47  *                                      Should stop LAN workplace lockups.
 48  *              Alan Cox        :       Some tidyups using the new skb list
 49  *                                      facilities
 50  *              Alan Cox        :       sk->keepopen now seems to work
 51  *              Alan Cox        :       Pulls options out correctly on accepts
 52  *              Alan Cox        :       Fixed assorted sk->rqueue->next errors
 53  *              Alan Cox        :       PSH doesn't end a TCP read. Switched a
 54  *                                      bit to skb ops.
 55  *              Alan Cox        :       Tidied tcp_data to avoid a potential
 56  *                                      nasty.
 57  *              Alan Cox        :       Added some better commenting, as the
 58  *                                      tcp is hard to follow
 59  *              Alan Cox        :       Removed incorrect check for 20 * psh
 60  *      Michael O'Reilly        :       ack < copied bug fix.
 61  *      Johannes Stille         :       Misc tcp fixes (not all in yet).
 62  *              Alan Cox        :       FIN with no memory -> CRASH
 63  *              Alan Cox        :       Added socket option proto entries.
 64  *                                      Also added awareness of them to accept.
 65  *              Alan Cox        :       Added TCP options (SOL_TCP)
 66  *              Alan Cox        :       Switched wakeup calls to callbacks,
 67  *                                      so the kernel can layer network
 68  *                                      sockets.
 69  *              Alan Cox        :       Use ip_tos/ip_ttl settings.
 70  *              Alan Cox        :       Handle FIN (more) properly (we hope).
 71  *              Alan Cox        :       RST frames sent on unsynchronised
 72  *                                      state ack error.
 73  *              Alan Cox        :       Put in missing check for SYN bit.
 74  *              Alan Cox        :       Added tcp_select_window() aka NET2E
 75  *                                      window non shrink trick.
 76  *              Alan Cox        :       Added a couple of small NET2E timer
 77  *                                      fixes
 78  *              Charles Hedrick :       TCP fixes
 79  *              Toomas Tamm     :       TCP window fixes
 80  *              Alan Cox        :       Small URG fix to rlogin ^C ack fight
 81  *              Charles Hedrick :       Rewrote most of it to actually work
 82  *              Linus           :       Rewrote tcp_read() and URG handling
 83  *                                      completely
 84  *              Gerhard Koerting:       Fixed some missing timer handling
 85  *              Matthew Dillon  :       Reworked TCP machine states as per RFC
 86  *              Gerhard Koerting:       PC/TCP workarounds
 87  *              Adam Caldwell   :       Assorted timer/timing errors
 88  *              Matthew Dillon  :       Fixed another RST bug
 89  *              Alan Cox        :       Move to kernel side addressing changes.
 90  *              Alan Cox        :       Beginning work on TCP fastpathing
 91  *                                      (not yet usable)
 92  *              Arnt Gulbrandsen:       Turbocharged tcp_check() routine.
 93  *              Alan Cox        :       TCP fast path debugging
 94  *              Alan Cox        :       Window clamping
 95  *              Michael Riepe   :       Bug in tcp_check()
 96  *              Matt Dillon     :       More TCP improvements and RST bug fixes
 97  *              Matt Dillon     :       Yet more small nasties remove from the
 98  *                                      TCP code (Be very nice to this man if
 99  *                                      tcp finally works 100%) 8)
100  *              Alan Cox        :       BSD accept semantics.
101  *              Alan Cox        :       Reset on closedown bug.
102  *      Peter De Schrijver      :       ENOTCONN check missing in tcp_sendto().
103  *              Michael Pall    :       Handle poll() after URG properly in
104  *                                      all cases.
105  *              Michael Pall    :       Undo the last fix in tcp_read_urg()
106  *                                      (multi URG PUSH broke rlogin).
107  *              Michael Pall    :       Fix the multi URG PUSH problem in
108  *                                      tcp_readable(), poll() after URG
109  *                                      works now.
110  *              Michael Pall    :       recv(...,MSG_OOB) never blocks in the
111  *                                      BSD api.
112  *              Alan Cox        :       Changed the semantics of sk->socket to
113  *                                      fix a race and a signal problem with
114  *                                      accept() and async I/O.
115  *              Alan Cox        :       Relaxed the rules on tcp_sendto().
116  *              Yury Shevchuk   :       Really fixed accept() blocking problem.
117  *              Craig I. Hagan  :       Allow for BSD compatible TIME_WAIT for
118  *                                      clients/servers which listen in on
119  *                                      fixed ports.
120  *              Alan Cox        :       Cleaned the above up and shrank it to
121  *                                      a sensible code size.
122  *              Alan Cox        :       Self connect lockup fix.
123  *              Alan Cox        :       No connect to multicast.
124  *              Ross Biro       :       Close unaccepted children on master
125  *                                      socket close.
126  *              Alan Cox        :       Reset tracing code.
127  *              Alan Cox        :       Spurious resets on shutdown.
128  *              Alan Cox        :       Giant 15 minute/60 second timer error
129  *              Alan Cox        :       Small whoops in polling before an
130  *                                      accept.
131  *              Alan Cox        :       Kept the state trace facility since
132  *                                      it's handy for debugging.
133  *              Alan Cox        :       More reset handler fixes.
134  *              Alan Cox        :       Started rewriting the code based on
135  *                                      the RFC's for other useful protocol
136  *                                      references see: Comer, KA9Q NOS, and
137  *                                      for a reference on the difference
138  *                                      between specifications and how BSD
139  *                                      works see the 4.4lite source.
140  *              A.N.Kuznetsov   :       Don't time wait on completion of tidy
141  *                                      close.
142  *              Linus Torvalds  :       Fin/Shutdown & copied_seq changes.
143  *              Linus Torvalds  :       Fixed BSD port reuse to work first syn
144  *              Alan Cox        :       Reimplemented timers as per the RFC
145  *                                      and using multiple timers for sanity.
146  *              Alan Cox        :       Small bug fixes, and a lot of new
147  *                                      comments.
148  *              Alan Cox        :       Fixed dual reader crash by locking
149  *                                      the buffers (much like datagram.c)
150  *              Alan Cox        :       Fixed stuck sockets in probe. A probe
151  *                                      now gets fed up of retrying without
152  *                                      (even a no space) answer.
153  *              Alan Cox        :       Extracted closing code better
154  *              Alan Cox        :       Fixed the closing state machine to
155  *                                      resemble the RFC.
156  *              Alan Cox        :       More 'per spec' fixes.
157  *              Jorge Cwik      :       Even faster checksumming.
158  *              Alan Cox        :       tcp_data() doesn't ack illegal PSH
159  *                                      only frames. At least one pc tcp stack
160  *                                      generates them.
161  *              Alan Cox        :       Cache last socket.
162  *              Alan Cox        :       Per route irtt.
163  *              Matt Day        :       poll()->select() match BSD precisely on error
164  *              Alan Cox        :       New buffers
165  *              Marc Tamsky     :       Various sk->prot->retransmits and
166  *                                      sk->retransmits misupdating fixed.
167  *                                      Fixed tcp_write_timeout: stuck close,
168  *                                      and TCP syn retries gets used now.
169  *              Mark Yarvis     :       In tcp_read_wakeup(), don't send an
170  *                                      ack if state is TCP_CLOSED.
171  *              Alan Cox        :       Look up device on a retransmit - routes may
172  *                                      change. Doesn't yet cope with MSS shrink right
173  *                                      but it's a start!
174  *              Marc Tamsky     :       Closing in closing fixes.
175  *              Mike Shaver     :       RFC1122 verifications.
176  *              Alan Cox        :       rcv_saddr errors.
177  *              Alan Cox        :       Block double connect().
178  *              Alan Cox        :       Small hooks for enSKIP.
179  *              Alexey Kuznetsov:       Path MTU discovery.
180  *              Alan Cox        :       Support soft errors.
181  *              Alan Cox        :       Fix MTU discovery pathological case
182  *                                      when the remote claims no mtu!
183  *              Marc Tamsky     :       TCP_CLOSE fix.
184  *              Colin (G3TNE)   :       Send a reset on syn ack replies in
185  *                                      window but wrong (fixes NT lpd problems)
186  *              Pedro Roque     :       Better TCP window handling, delayed ack.
187  *              Joerg Reuter    :       No modification of locked buffers in
188  *                                      tcp_do_retransmit()
189  *              Eric Schenk     :       Changed receiver side silly window
190  *                                      avoidance algorithm to BSD style
191  *                                      algorithm. This doubles throughput
192  *                                      against machines running Solaris,
193  *                                      and seems to result in general
194  *                                      improvement.
195  *      Stefan Magdalinski      :       adjusted tcp_readable() to fix FIONREAD
196  *      Willy Konynenberg       :       Transparent proxying support.
197  *      Mike McLagan            :       Routing by source
198  *              Keith Owens     :       Do proper merging with partial SKB's in
199  *                                      tcp_do_sendmsg to avoid burstiness.
200  *              Eric Schenk     :       Fix fast close down bug with
201  *                                      shutdown() followed by close().
202  *              Andi Kleen      :       Make poll agree with SIGIO
203  *      Salvatore Sanfilippo    :       Support SO_LINGER with linger == 1 and
204  *                                      lingertime == 0 (RFC 793 ABORT Call)
205  *      Hirokazu Takahashi      :       Use copy_from_user() instead of
206  *                                      csum_and_copy_from_user() if possible.
207  *
208  *              This program is free software; you can redistribute it and/or
209  *              modify it under the terms of the GNU General Public License
210  *              as published by the Free Software Foundation; either version
211  *              2 of the License, or(at your option) any later version.
212  *
213  * Description of States:
214  *
215  *      TCP_SYN_SENT            sent a connection request, waiting for ack
216  *
217  *      TCP_SYN_RECV            received a connection request, sent ack,
218  *                              waiting for final ack in three-way handshake.
219  *
220  *      TCP_ESTABLISHED         connection established
221  *
222  *      TCP_FIN_WAIT1           our side has shutdown, waiting to complete
223  *                              transmission of remaining buffered data
224  *
225  *      TCP_FIN_WAIT2           all buffered data sent, waiting for remote
226  *                              to shutdown
227  *
228  *      TCP_CLOSING             both sides have shutdown but we still have
229  *                              data we have to finish sending
230  *
231  *      TCP_TIME_WAIT           timeout to catch resent junk before entering
232  *                              closed, can only be entered from FIN_WAIT2
233  *                              or CLOSING.  Required because the other end
234  *                              may not have gotten our last ACK causing it
235  *                              to retransmit the data packet (which we ignore)
236  *
237  *      TCP_CLOSE_WAIT          remote side has shutdown and is waiting for
238  *                              us to finish writing our data and to shutdown
239  *                              (we have to close() to move on to LAST_ACK)
240  *
241  *      TCP_LAST_ACK            out side has shutdown after remote has
242  *                              shutdown.  There may still be data in our
243  *                              buffer that we have to finish sending
244  *
245  *      TCP_CLOSE               socket is finished
246  */
247 
248 #define pr_fmt(fmt) "TCP: " fmt
249 
250 #include <linux/kernel.h>
251 #include <linux/module.h>
252 #include <linux/types.h>
253 #include <linux/fcntl.h>
254 #include <linux/poll.h>
255 #include <linux/init.h>
256 #include <linux/fs.h>
257 #include <linux/skbuff.h>
258 #include <linux/scatterlist.h>
259 #include <linux/splice.h>
260 #include <linux/net.h>
261 #include <linux/socket.h>
262 #include <linux/random.h>
263 #include <linux/bootmem.h>
264 #include <linux/highmem.h>
265 #include <linux/swap.h>
266 #include <linux/cache.h>
267 #include <linux/err.h>
268 #include <linux/crypto.h>
269 #include <linux/time.h>
270 #include <linux/slab.h>
271 
272 #include <net/icmp.h>
273 #include <net/inet_common.h>
274 #include <net/tcp.h>
275 #include <net/xfrm.h>
276 #include <net/ip.h>
277 #include <net/netdma.h>
278 #include <net/sock.h>
279 
280 #include <asm/uaccess.h>
281 #include <asm/ioctls.h>
282 #include <net/busy_poll.h>
283 
284 int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
285 
286 int sysctl_tcp_min_tso_segs __read_mostly = 2;
287 
288 int sysctl_tcp_autocorking __read_mostly = 1;
289 
290 struct percpu_counter tcp_orphan_count;
291 EXPORT_SYMBOL_GPL(tcp_orphan_count);
292 
293 long sysctl_tcp_mem[3] __read_mostly;
294 int sysctl_tcp_wmem[3] __read_mostly;
295 int sysctl_tcp_rmem[3] __read_mostly;
296 
297 EXPORT_SYMBOL(sysctl_tcp_mem);
298 EXPORT_SYMBOL(sysctl_tcp_rmem);
299 EXPORT_SYMBOL(sysctl_tcp_wmem);
300 
301 atomic_long_t tcp_memory_allocated;     /* Current allocated memory. */
302 EXPORT_SYMBOL(tcp_memory_allocated);
303 
304 /*
305  * Current number of TCP sockets.
306  */
307 struct percpu_counter tcp_sockets_allocated;
308 EXPORT_SYMBOL(tcp_sockets_allocated);
309 
310 /*
311  * TCP splice context
312  */
313 struct tcp_splice_state {
314         struct pipe_inode_info *pipe;
315         size_t len;
316         unsigned int flags;
317 };
318 
319 /*
320  * Pressure flag: try to collapse.
321  * Technical note: it is used by multiple contexts non atomically.
322  * All the __sk_mem_schedule() is of this nature: accounting
323  * is strict, actions are advisory and have some latency.
324  */
325 int tcp_memory_pressure __read_mostly;
326 EXPORT_SYMBOL(tcp_memory_pressure);
327 
328 void tcp_enter_memory_pressure(struct sock *sk)
329 {
330         if (!tcp_memory_pressure) {
331                 NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPMEMORYPRESSURES);
332                 tcp_memory_pressure = 1;
333         }
334 }
335 EXPORT_SYMBOL(tcp_enter_memory_pressure);
336 
337 /* Convert seconds to retransmits based on initial and max timeout */
338 static u8 secs_to_retrans(int seconds, int timeout, int rto_max)
339 {
340         u8 res = 0;
341 
342         if (seconds > 0) {
343                 int period = timeout;
344 
345                 res = 1;
346                 while (seconds > period && res < 255) {
347                         res++;
348                         timeout <<= 1;
349                         if (timeout > rto_max)
350                                 timeout = rto_max;
351                         period += timeout;
352                 }
353         }
354         return res;
355 }
356 
357 /* Convert retransmits to seconds based on initial and max timeout */
358 static int retrans_to_secs(u8 retrans, int timeout, int rto_max)
359 {
360         int period = 0;
361 
362         if (retrans > 0) {
363                 period = timeout;
364                 while (--retrans) {
365                         timeout <<= 1;
366                         if (timeout > rto_max)
367                                 timeout = rto_max;
368                         period += timeout;
369                 }
370         }
371         return period;
372 }
373 
374 /* Address-family independent initialization for a tcp_sock.
375  *
376  * NOTE: A lot of things set to zero explicitly by call to
377  *       sk_alloc() so need not be done here.
378  */
379 void tcp_init_sock(struct sock *sk)
380 {
381         struct inet_connection_sock *icsk = inet_csk(sk);
382         struct tcp_sock *tp = tcp_sk(sk);
383 
384         __skb_queue_head_init(&tp->out_of_order_queue);
385         tcp_init_xmit_timers(sk);
386         tcp_prequeue_init(tp);
387         INIT_LIST_HEAD(&tp->tsq_node);
388 
389         icsk->icsk_rto = TCP_TIMEOUT_INIT;
390         tp->mdev_us = jiffies_to_usecs(TCP_TIMEOUT_INIT);
391 
392         /* So many TCP implementations out there (incorrectly) count the
393          * initial SYN frame in their delayed-ACK and congestion control
394          * algorithms that we must have the following bandaid to talk
395          * efficiently to them.  -DaveM
396          */
397         tp->snd_cwnd = TCP_INIT_CWND;
398 
399         /* See draft-stevens-tcpca-spec-01 for discussion of the
400          * initialization of these values.
401          */
402         tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
403         tp->snd_cwnd_clamp = ~0;
404         tp->mss_cache = TCP_MSS_DEFAULT;
405 
406         tp->reordering = sysctl_tcp_reordering;
407         tcp_enable_early_retrans(tp);
408         icsk->icsk_ca_ops = &tcp_init_congestion_ops;
409 
410         tp->tsoffset = 0;
411 
412         sk->sk_state = TCP_CLOSE;
413 
414         sk->sk_write_space = sk_stream_write_space;
415         sock_set_flag(sk, SOCK_USE_WRITE_QUEUE);
416 
417         icsk->icsk_sync_mss = tcp_sync_mss;
418 
419         sk->sk_sndbuf = sysctl_tcp_wmem[1];
420         sk->sk_rcvbuf = sysctl_tcp_rmem[1];
421 
422         local_bh_disable();
423         sock_update_memcg(sk);
424         sk_sockets_allocated_inc(sk);
425         local_bh_enable();
426 }
427 EXPORT_SYMBOL(tcp_init_sock);
428 
429 /*
430  *      Wait for a TCP event.
431  *
432  *      Note that we don't need to lock the socket, as the upper poll layers
433  *      take care of normal races (between the test and the event) and we don't
434  *      go look at any of the socket buffers directly.
435  */
436 unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
437 {
438         unsigned int mask;
439         struct sock *sk = sock->sk;
440         const struct tcp_sock *tp = tcp_sk(sk);
441 
442         sock_rps_record_flow(sk);
443 
444         sock_poll_wait(file, sk_sleep(sk), wait);
445         if (sk->sk_state == TCP_LISTEN)
446                 return inet_csk_listen_poll(sk);
447 
448         /* Socket is not locked. We are protected from async events
449          * by poll logic and correct handling of state changes
450          * made by other threads is impossible in any case.
451          */
452 
453         mask = 0;
454 
455         /*
456          * POLLHUP is certainly not done right. But poll() doesn't
457          * have a notion of HUP in just one direction, and for a
458          * socket the read side is more interesting.
459          *
460          * Some poll() documentation says that POLLHUP is incompatible
461          * with the POLLOUT/POLLWR flags, so somebody should check this
462          * all. But careful, it tends to be safer to return too many
463          * bits than too few, and you can easily break real applications
464          * if you don't tell them that something has hung up!
465          *
466          * Check-me.
467          *
468          * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
469          * our fs/select.c). It means that after we received EOF,
470          * poll always returns immediately, making impossible poll() on write()
471          * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
472          * if and only if shutdown has been made in both directions.
473          * Actually, it is interesting to look how Solaris and DUX
474          * solve this dilemma. I would prefer, if POLLHUP were maskable,
475          * then we could set it on SND_SHUTDOWN. BTW examples given
476          * in Stevens' books assume exactly this behaviour, it explains
477          * why POLLHUP is incompatible with POLLOUT.    --ANK
478          *
479          * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
480          * blocking on fresh not-connected or disconnected socket. --ANK
481          */
482         if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
483                 mask |= POLLHUP;
484         if (sk->sk_shutdown & RCV_SHUTDOWN)
485                 mask |= POLLIN | POLLRDNORM | POLLRDHUP;
486 
487         /* Connected or passive Fast Open socket? */
488         if (sk->sk_state != TCP_SYN_SENT &&
489             (sk->sk_state != TCP_SYN_RECV || tp->fastopen_rsk != NULL)) {
490                 int target = sock_rcvlowat(sk, 0, INT_MAX);
491 
492                 if (tp->urg_seq == tp->copied_seq &&
493                     !sock_flag(sk, SOCK_URGINLINE) &&
494                     tp->urg_data)
495                         target++;
496 
497                 /* Potential race condition. If read of tp below will
498                  * escape above sk->sk_state, we can be illegally awaken
499                  * in SYN_* states. */
500                 if (tp->rcv_nxt - tp->copied_seq >= target)
501                         mask |= POLLIN | POLLRDNORM;
502 
503                 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
504                         if (sk_stream_is_writeable(sk)) {
505                                 mask |= POLLOUT | POLLWRNORM;
506                         } else {  /* send SIGIO later */
507                                 set_bit(SOCK_ASYNC_NOSPACE,
508                                         &sk->sk_socket->flags);
509                                 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
510 
511                                 /* Race breaker. If space is freed after
512                                  * wspace test but before the flags are set,
513                                  * IO signal will be lost.
514                                  */
515                                 if (sk_stream_is_writeable(sk))
516                                         mask |= POLLOUT | POLLWRNORM;
517                         }
518                 } else
519                         mask |= POLLOUT | POLLWRNORM;
520 
521                 if (tp->urg_data & TCP_URG_VALID)
522                         mask |= POLLPRI;
523         }
524         /* This barrier is coupled with smp_wmb() in tcp_reset() */
525         smp_rmb();
526         if (sk->sk_err)
527                 mask |= POLLERR;
528 
529         return mask;
530 }
531 EXPORT_SYMBOL(tcp_poll);
532 
533 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
534 {
535         struct tcp_sock *tp = tcp_sk(sk);
536         int answ;
537         bool slow;
538 
539         switch (cmd) {
540         case SIOCINQ:
541                 if (sk->sk_state == TCP_LISTEN)
542                         return -EINVAL;
543 
544                 slow = lock_sock_fast(sk);
545                 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
546                         answ = 0;
547                 else if (sock_flag(sk, SOCK_URGINLINE) ||
548                          !tp->urg_data ||
549                          before(tp->urg_seq, tp->copied_seq) ||
550                          !before(tp->urg_seq, tp->rcv_nxt)) {
551 
552                         answ = tp->rcv_nxt - tp->copied_seq;
553 
554                         /* Subtract 1, if FIN was received */
555                         if (answ && sock_flag(sk, SOCK_DONE))
556                                 answ--;
557                 } else
558                         answ = tp->urg_seq - tp->copied_seq;
559                 unlock_sock_fast(sk, slow);
560                 break;
561         case SIOCATMARK:
562                 answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
563                 break;
564         case SIOCOUTQ:
565                 if (sk->sk_state == TCP_LISTEN)
566                         return -EINVAL;
567 
568                 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
569                         answ = 0;
570                 else
571                         answ = tp->write_seq - tp->snd_una;
572                 break;
573         case SIOCOUTQNSD:
574                 if (sk->sk_state == TCP_LISTEN)
575                         return -EINVAL;
576 
577                 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
578                         answ = 0;
579                 else
580                         answ = tp->write_seq - tp->snd_nxt;
581                 break;
582         default:
583                 return -ENOIOCTLCMD;
584         }
585 
586         return put_user(answ, (int __user *)arg);
587 }
588 EXPORT_SYMBOL(tcp_ioctl);
589 
590 static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
591 {
592         TCP_SKB_CB(skb)->tcp_flags |= TCPHDR_PSH;
593         tp->pushed_seq = tp->write_seq;
594 }
595 
596 static inline bool forced_push(const struct tcp_sock *tp)
597 {
598         return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
599 }
600 
601 static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
602 {
603         struct tcp_sock *tp = tcp_sk(sk);
604         struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
605 
606         skb->csum    = 0;
607         tcb->seq     = tcb->end_seq = tp->write_seq;
608         tcb->tcp_flags = TCPHDR_ACK;
609         tcb->sacked  = 0;
610         skb_header_release(skb);
611         tcp_add_write_queue_tail(sk, skb);
612         sk->sk_wmem_queued += skb->truesize;
613         sk_mem_charge(sk, skb->truesize);
614         if (tp->nonagle & TCP_NAGLE_PUSH)
615                 tp->nonagle &= ~TCP_NAGLE_PUSH;
616 }
617 
618 static inline void tcp_mark_urg(struct tcp_sock *tp, int flags)
619 {
620         if (flags & MSG_OOB)
621                 tp->snd_up = tp->write_seq;
622 }
623 
624 /* If a not yet filled skb is pushed, do not send it if
625  * we have data packets in Qdisc or NIC queues :
626  * Because TX completion will happen shortly, it gives a chance
627  * to coalesce future sendmsg() payload into this skb, without
628  * need for a timer, and with no latency trade off.
629  * As packets containing data payload have a bigger truesize
630  * than pure acks (dataless) packets, the last checks prevent
631  * autocorking if we only have an ACK in Qdisc/NIC queues,
632  * or if TX completion was delayed after we processed ACK packet.
633  */
634 static bool tcp_should_autocork(struct sock *sk, struct sk_buff *skb,
635                                 int size_goal)
636 {
637         return skb->len < size_goal &&
638                sysctl_tcp_autocorking &&
639                skb != tcp_write_queue_head(sk) &&
640                atomic_read(&sk->sk_wmem_alloc) > skb->truesize;
641 }
642 
643 static void tcp_push(struct sock *sk, int flags, int mss_now,
644                      int nonagle, int size_goal)
645 {
646         struct tcp_sock *tp = tcp_sk(sk);
647         struct sk_buff *skb;
648 
649         if (!tcp_send_head(sk))
650                 return;
651 
652         skb = tcp_write_queue_tail(sk);
653         if (!(flags & MSG_MORE) || forced_push(tp))
654                 tcp_mark_push(tp, skb);
655 
656         tcp_mark_urg(tp, flags);
657 
658         if (tcp_should_autocork(sk, skb, size_goal)) {
659 
660                 /* avoid atomic op if TSQ_THROTTLED bit is already set */
661                 if (!test_bit(TSQ_THROTTLED, &tp->tsq_flags)) {
662                         NET_INC_STATS(sock_net(sk), LINUX_MIB_TCPAUTOCORKING);
663                         set_bit(TSQ_THROTTLED, &tp->tsq_flags);
664                 }
665                 /* It is possible TX completion already happened
666                  * before we set TSQ_THROTTLED.
667                  */
668                 if (atomic_read(&sk->sk_wmem_alloc) > skb->truesize)
669                         return;
670         }
671 
672         if (flags & MSG_MORE)
673                 nonagle = TCP_NAGLE_CORK;
674 
675         __tcp_push_pending_frames(sk, mss_now, nonagle);
676 }
677 
678 static int tcp_splice_data_recv(read_descriptor_t *rd_desc, struct sk_buff *skb,
679                                 unsigned int offset, size_t len)
680 {
681         struct tcp_splice_state *tss = rd_desc->arg.data;
682         int ret;
683 
684         ret = skb_splice_bits(skb, offset, tss->pipe, min(rd_desc->count, len),
685                               tss->flags);
686         if (ret > 0)
687                 rd_desc->count -= ret;
688         return ret;
689 }
690 
691 static int __tcp_splice_read(struct sock *sk, struct tcp_splice_state *tss)
692 {
693         /* Store TCP splice context information in read_descriptor_t. */
694         read_descriptor_t rd_desc = {
695                 .arg.data = tss,
696                 .count    = tss->len,
697         };
698 
699         return tcp_read_sock(sk, &rd_desc, tcp_splice_data_recv);
700 }
701 
702 /**
703  *  tcp_splice_read - splice data from TCP socket to a pipe
704  * @sock:       socket to splice from
705  * @ppos:       position (not valid)
706  * @pipe:       pipe to splice to
707  * @len:        number of bytes to splice
708  * @flags:      splice modifier flags
709  *
710  * Description:
711  *    Will read pages from given socket and fill them into a pipe.
712  *
713  **/
714 ssize_t tcp_splice_read(struct socket *sock, loff_t *ppos,
715                         struct pipe_inode_info *pipe, size_t len,
716                         unsigned int flags)
717 {
718         struct sock *sk = sock->sk;
719         struct tcp_splice_state tss = {
720                 .pipe = pipe,
721                 .len = len,
722                 .flags = flags,
723         };
724         long timeo;
725         ssize_t spliced;
726         int ret;
727 
728         sock_rps_record_flow(sk);
729         /*
730          * We can't seek on a socket input
731          */
732         if (unlikely(*ppos))
733                 return -ESPIPE;
734 
735         ret = spliced = 0;
736 
737         lock_sock(sk);
738 
739         timeo = sock_rcvtimeo(sk, sock->file->f_flags & O_NONBLOCK);
740         while (tss.len) {
741                 ret = __tcp_splice_read(sk, &tss);
742                 if (ret < 0)
743                         break;
744                 else if (!ret) {
745                         if (spliced)
746                                 break;
747                         if (sock_flag(sk, SOCK_DONE))
748                                 break;
749                         if (sk->sk_err) {
750                                 ret = sock_error(sk);
751                                 break;
752                         }
753                         if (sk->sk_shutdown & RCV_SHUTDOWN)
754                                 break;
755                         if (sk->sk_state == TCP_CLOSE) {
756                                 /*
757                                  * This occurs when user tries to read
758                                  * from never connected socket.
759                                  */
760                                 if (!sock_flag(sk, SOCK_DONE))
761                                         ret = -ENOTCONN;
762                                 break;
763                         }
764                         if (!timeo) {
765                                 ret = -EAGAIN;
766                                 break;
767                         }
768                         sk_wait_data(sk, &timeo);
769                         if (signal_pending(current)) {
770                                 ret = sock_intr_errno(timeo);
771                                 break;
772                         }
773                         continue;
774                 }
775                 tss.len -= ret;
776                 spliced += ret;
777 
778                 if (!timeo)
779                         break;
780                 release_sock(sk);
781                 lock_sock(sk);
782 
783                 if (sk->sk_err || sk->sk_state == TCP_CLOSE ||
784                     (sk->sk_shutdown & RCV_SHUTDOWN) ||
785                     signal_pending(current))
786                         break;
787         }
788 
789         release_sock(sk);
790 
791         if (spliced)
792                 return spliced;
793 
794         return ret;
795 }
796 EXPORT_SYMBOL(tcp_splice_read);
797 
798 struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp)
799 {
800         struct sk_buff *skb;
801 
802         /* The TCP header must be at least 32-bit aligned.  */
803         size = ALIGN(size, 4);
804 
805         skb = alloc_skb_fclone(size + sk->sk_prot->max_header, gfp);
806         if (skb) {
807                 if (sk_wmem_schedule(sk, skb->truesize)) {
808                         skb_reserve(skb, sk->sk_prot->max_header);
809                         /*
810                          * Make sure that we have exactly size bytes
811                          * available to the caller, no more, no less.
812                          */
813                         skb->reserved_tailroom = skb->end - skb->tail - size;
814                         return skb;
815                 }
816                 __kfree_skb(skb);
817         } else {
818                 sk->sk_prot->enter_memory_pressure(sk);
819                 sk_stream_moderate_sndbuf(sk);
820         }
821         return NULL;
822 }
823 
824 static unsigned int tcp_xmit_size_goal(struct sock *sk, u32 mss_now,
825                                        int large_allowed)
826 {
827         struct tcp_sock *tp = tcp_sk(sk);
828         u32 xmit_size_goal, old_size_goal;
829 
830         xmit_size_goal = mss_now;
831 
832         if (large_allowed && sk_can_gso(sk)) {
833                 u32 gso_size, hlen;
834 
835                 /* Maybe we should/could use sk->sk_prot->max_header here ? */
836                 hlen = inet_csk(sk)->icsk_af_ops->net_header_len +
837                        inet_csk(sk)->icsk_ext_hdr_len +
838                        tp->tcp_header_len;
839 
840                 /* Goal is to send at least one packet per ms,
841                  * not one big TSO packet every 100 ms.
842                  * This preserves ACK clocking and is consistent
843                  * with tcp_tso_should_defer() heuristic.
844                  */
845                 gso_size = sk->sk_pacing_rate / (2 * MSEC_PER_SEC);
846                 gso_size = max_t(u32, gso_size,
847                                  sysctl_tcp_min_tso_segs * mss_now);
848 
849                 xmit_size_goal = min_t(u32, gso_size,
850                                        sk->sk_gso_max_size - 1 - hlen);
851 
852                 xmit_size_goal = tcp_bound_to_half_wnd(tp, xmit_size_goal);
853 
854                 /* We try hard to avoid divides here */
855                 old_size_goal = tp->xmit_size_goal_segs * mss_now;
856 
857                 if (likely(old_size_goal <= xmit_size_goal &&
858                            old_size_goal + mss_now > xmit_size_goal)) {
859                         xmit_size_goal = old_size_goal;
860                 } else {
861                         tp->xmit_size_goal_segs =
862                                 min_t(u16, xmit_size_goal / mss_now,
863                                       sk->sk_gso_max_segs);
864                         xmit_size_goal = tp->xmit_size_goal_segs * mss_now;
865                 }
866         }
867 
868         return max(xmit_size_goal, mss_now);
869 }
870 
871 static int tcp_send_mss(struct sock *sk, int *size_goal, int flags)
872 {
873         int mss_now;
874 
875         mss_now = tcp_current_mss(sk);
876         *size_goal = tcp_xmit_size_goal(sk, mss_now, !(flags & MSG_OOB));
877 
878         return mss_now;
879 }
880 
881 static ssize_t do_tcp_sendpages(struct sock *sk, struct page *page, int offset,
882                                 size_t size, int flags)
883 {
884         struct tcp_sock *tp = tcp_sk(sk);
885         int mss_now, size_goal;
886         int err;
887         ssize_t copied;
888         long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
889 
890         /* Wait for a connection to finish. One exception is TCP Fast Open
891          * (passive side) where data is allowed to be sent before a connection
892          * is fully established.
893          */
894         if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
895             !tcp_passive_fastopen(sk)) {
896                 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
897                         goto out_err;
898         }
899 
900         clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
901 
902         mss_now = tcp_send_mss(sk, &size_goal, flags);
903         copied = 0;
904 
905         err = -EPIPE;
906         if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
907                 goto out_err;
908 
909         while (size > 0) {
910                 struct sk_buff *skb = tcp_write_queue_tail(sk);
911                 int copy, i;
912                 bool can_coalesce;
913 
914                 if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
915 new_segment:
916                         if (!sk_stream_memory_free(sk))
917                                 goto wait_for_sndbuf;
918 
919                         skb = sk_stream_alloc_skb(sk, 0, sk->sk_allocation);
920                         if (!skb)
921                                 goto wait_for_memory;
922 
923                         skb_entail(sk, skb);
924                         copy = size_goal;
925                 }
926 
927                 if (copy > size)
928                         copy = size;
929 
930                 i = skb_shinfo(skb)->nr_frags;
931                 can_coalesce = skb_can_coalesce(skb, i, page, offset);
932                 if (!can_coalesce && i >= MAX_SKB_FRAGS) {
933                         tcp_mark_push(tp, skb);
934                         goto new_segment;
935                 }
936                 if (!sk_wmem_schedule(sk, copy))
937                         goto wait_for_memory;
938 
939                 if (can_coalesce) {
940                         skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
941                 } else {
942                         get_page(page);
943                         skb_fill_page_desc(skb, i, page, offset, copy);
944                 }
945                 skb_shinfo(skb)->tx_flags |= SKBTX_SHARED_FRAG;
946 
947                 skb->len += copy;
948                 skb->data_len += copy;
949                 skb->truesize += copy;
950                 sk->sk_wmem_queued += copy;
951                 sk_mem_charge(sk, copy);
952                 skb->ip_summed = CHECKSUM_PARTIAL;
953                 tp->write_seq += copy;
954                 TCP_SKB_CB(skb)->end_seq += copy;
955                 skb_shinfo(skb)->gso_segs = 0;
956 
957                 if (!copied)
958                         TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
959 
960                 copied += copy;
961                 offset += copy;
962                 if (!(size -= copy))
963                         goto out;
964 
965                 if (skb->len < size_goal || (flags & MSG_OOB))
966                         continue;
967 
968                 if (forced_push(tp)) {
969                         tcp_mark_push(tp, skb);
970                         __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
971                 } else if (skb == tcp_send_head(sk))
972                         tcp_push_one(sk, mss_now);
973                 continue;
974 
975 wait_for_sndbuf:
976                 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
977 wait_for_memory:
978                 tcp_push(sk, flags & ~MSG_MORE, mss_now,
979                          TCP_NAGLE_PUSH, size_goal);
980 
981                 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
982                         goto do_error;
983 
984                 mss_now = tcp_send_mss(sk, &size_goal, flags);
985         }
986 
987 out:
988         if (copied && !(flags & MSG_SENDPAGE_NOTLAST))
989                 tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
990         return copied;
991 
992 do_error:
993         if (copied)
994                 goto out;
995 out_err:
996         return sk_stream_error(sk, flags, err);
997 }
998 
999 int tcp_sendpage(struct sock *sk, struct page *page, int offset,
1000                  size_t size, int flags)
1001 {
1002         ssize_t res;
1003 
1004         if (!(sk->sk_route_caps & NETIF_F_SG) ||
1005             !(sk->sk_route_caps & NETIF_F_ALL_CSUM))
1006                 return sock_no_sendpage(sk->sk_socket, page, offset, size,
1007                                         flags);
1008 
1009         lock_sock(sk);
1010         res = do_tcp_sendpages(sk, page, offset, size, flags);
1011         release_sock(sk);
1012         return res;
1013 }
1014 EXPORT_SYMBOL(tcp_sendpage);
1015 
1016 static inline int select_size(const struct sock *sk, bool sg)
1017 {
1018         const struct tcp_sock *tp = tcp_sk(sk);
1019         int tmp = tp->mss_cache;
1020 
1021         if (sg) {
1022                 if (sk_can_gso(sk)) {
1023                         /* Small frames wont use a full page:
1024                          * Payload will immediately follow tcp header.
1025                          */
1026                         tmp = SKB_WITH_OVERHEAD(2048 - MAX_TCP_HEADER);
1027                 } else {
1028                         int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
1029 
1030                         if (tmp >= pgbreak &&
1031                             tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
1032                                 tmp = pgbreak;
1033                 }
1034         }
1035 
1036         return tmp;
1037 }
1038 
1039 void tcp_free_fastopen_req(struct tcp_sock *tp)
1040 {
1041         if (tp->fastopen_req != NULL) {
1042                 kfree(tp->fastopen_req);
1043                 tp->fastopen_req = NULL;
1044         }
1045 }
1046 
1047 static int tcp_sendmsg_fastopen(struct sock *sk, struct msghdr *msg,
1048                                 int *copied, size_t size)
1049 {
1050         struct tcp_sock *tp = tcp_sk(sk);
1051         int err, flags;
1052 
1053         if (!(sysctl_tcp_fastopen & TFO_CLIENT_ENABLE))
1054                 return -EOPNOTSUPP;
1055         if (tp->fastopen_req != NULL)
1056                 return -EALREADY; /* Another Fast Open is in progress */
1057 
1058         tp->fastopen_req = kzalloc(sizeof(struct tcp_fastopen_request),
1059                                    sk->sk_allocation);
1060         if (unlikely(tp->fastopen_req == NULL))
1061                 return -ENOBUFS;
1062         tp->fastopen_req->data = msg;
1063         tp->fastopen_req->size = size;
1064 
1065         flags = (msg->msg_flags & MSG_DONTWAIT) ? O_NONBLOCK : 0;
1066         err = __inet_stream_connect(sk->sk_socket, msg->msg_name,
1067                                     msg->msg_namelen, flags);
1068         *copied = tp->fastopen_req->copied;
1069         tcp_free_fastopen_req(tp);
1070         return err;
1071 }
1072 
1073 int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1074                 size_t size)
1075 {
1076         struct iovec *iov;
1077         struct tcp_sock *tp = tcp_sk(sk);
1078         struct sk_buff *skb;
1079         int iovlen, flags, err, copied = 0;
1080         int mss_now = 0, size_goal, copied_syn = 0, offset = 0;
1081         bool sg;
1082         long timeo;
1083 
1084         lock_sock(sk);
1085 
1086         flags = msg->msg_flags;
1087         if (flags & MSG_FASTOPEN) {
1088                 err = tcp_sendmsg_fastopen(sk, msg, &copied_syn, size);
1089                 if (err == -EINPROGRESS && copied_syn > 0)
1090                         goto out;
1091                 else if (err)
1092                         goto out_err;
1093                 offset = copied_syn;
1094         }
1095 
1096         timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
1097 
1098         /* Wait for a connection to finish. One exception is TCP Fast Open
1099          * (passive side) where data is allowed to be sent before a connection
1100          * is fully established.
1101          */
1102         if (((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT)) &&
1103             !tcp_passive_fastopen(sk)) {
1104                 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
1105                         goto do_error;
1106         }
1107 
1108         if (unlikely(tp->repair)) {
1109                 if (tp->repair_queue == TCP_RECV_QUEUE) {
1110                         copied = tcp_send_rcvq(sk, msg, size);
1111                         goto out;
1112                 }
1113 
1114                 err = -EINVAL;
1115                 if (tp->repair_queue == TCP_NO_QUEUE)
1116                         goto out_err;
1117 
1118                 /* 'common' sending to sendq */
1119         }
1120 
1121         /* This should be in poll */
1122         clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
1123 
1124         mss_now = tcp_send_mss(sk, &size_goal, flags);
1125 
1126         /* Ok commence sending. */
1127         iovlen = msg->msg_iovlen;
1128         iov = msg->msg_iov;
1129         copied = 0;
1130 
1131         err = -EPIPE;
1132         if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
1133                 goto out_err;
1134 
1135         sg = !!(sk->sk_route_caps & NETIF_F_SG);
1136 
1137         while (--iovlen >= 0) {
1138                 size_t seglen = iov->iov_len;
1139                 unsigned char __user *from = iov->iov_base;
1140 
1141                 iov++;
1142                 if (unlikely(offset > 0)) {  /* Skip bytes copied in SYN */
1143                         if (offset >= seglen) {
1144                                 offset -= seglen;
1145                                 continue;
1146                         }
1147                         seglen -= offset;
1148                         from += offset;
1149                         offset = 0;
1150                 }
1151 
1152                 while (seglen > 0) {
1153                         int copy = 0;
1154                         int max = size_goal;
1155 
1156                         skb = tcp_write_queue_tail(sk);
1157                         if (tcp_send_head(sk)) {
1158                                 if (skb->ip_summed == CHECKSUM_NONE)
1159                                         max = mss_now;
1160                                 copy = max - skb->len;
1161                         }
1162 
1163                         if (copy <= 0) {
1164 new_segment:
1165                                 /* Allocate new segment. If the interface is SG,
1166                                  * allocate skb fitting to single page.
1167                                  */
1168                                 if (!sk_stream_memory_free(sk))
1169                                         goto wait_for_sndbuf;
1170 
1171                                 skb = sk_stream_alloc_skb(sk,
1172                                                           select_size(sk, sg),
1173                                                           sk->sk_allocation);
1174                                 if (!skb)
1175                                         goto wait_for_memory;
1176 
1177                                 /*
1178                                  * All packets are restored as if they have
1179                                  * already been sent.
1180                                  */
1181                                 if (tp->repair)
1182                                         TCP_SKB_CB(skb)->when = tcp_time_stamp;
1183 
1184                                 /*
1185                                  * Check whether we can use HW checksum.
1186                                  */
1187                                 if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
1188                                         skb->ip_summed = CHECKSUM_PARTIAL;
1189 
1190                                 skb_entail(sk, skb);
1191                                 copy = size_goal;
1192                                 max = size_goal;
1193                         }
1194 
1195                         /* Try to append data to the end of skb. */
1196                         if (copy > seglen)
1197                                 copy = seglen;
1198 
1199                         /* Where to copy to? */
1200                         if (skb_availroom(skb) > 0) {
1201                                 /* We have some space in skb head. Superb! */
1202                                 copy = min_t(int, copy, skb_availroom(skb));
1203                                 err = skb_add_data_nocache(sk, skb, from, copy);
1204                                 if (err)
1205                                         goto do_fault;
1206                         } else {
1207                                 bool merge = true;
1208                                 int i = skb_shinfo(skb)->nr_frags;
1209                                 struct page_frag *pfrag = sk_page_frag(sk);
1210 
1211                                 if (!sk_page_frag_refill(sk, pfrag))
1212                                         goto wait_for_memory;
1213 
1214                                 if (!skb_can_coalesce(skb, i, pfrag->page,
1215                                                       pfrag->offset)) {
1216                                         if (i == MAX_SKB_FRAGS || !sg) {
1217                                                 tcp_mark_push(tp, skb);
1218                                                 goto new_segment;
1219                                         }
1220                                         merge = false;
1221                                 }
1222 
1223                                 copy = min_t(int, copy, pfrag->size - pfrag->offset);
1224 
1225                                 if (!sk_wmem_schedule(sk, copy))
1226                                         goto wait_for_memory;
1227 
1228                                 err = skb_copy_to_page_nocache(sk, from, skb,
1229                                                                pfrag->page,
1230                                                                pfrag->offset,
1231                                                                copy);
1232                                 if (err)
1233                                         goto do_error;
1234 
1235                                 /* Update the skb. */
1236                                 if (merge) {
1237                                         skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
1238                                 } else {
1239                                         skb_fill_page_desc(skb, i, pfrag->page,
1240                                                            pfrag->offset, copy);
1241                                         get_page(pfrag->page);
1242                                 }
1243                                 pfrag->offset += copy;
1244                         }
1245 
1246                         if (!copied)
1247                                 TCP_SKB_CB(skb)->tcp_flags &= ~TCPHDR_PSH;
1248 
1249                         tp->write_seq += copy;
1250                         TCP_SKB_CB(skb)->end_seq += copy;
1251                         skb_shinfo(skb)->gso_segs = 0;
1252 
1253                         from += copy;
1254                         copied += copy;
1255                         if ((seglen -= copy) == 0 && iovlen == 0)
1256                                 goto out;
1257 
1258                         if (skb->len < max || (flags & MSG_OOB) || unlikely(tp->repair))
1259                                 continue;
1260 
1261                         if (forced_push(tp)) {
1262                                 tcp_mark_push(tp, skb);
1263                                 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
1264                         } else if (skb == tcp_send_head(sk))
1265                                 tcp_push_one(sk, mss_now);
1266                         continue;
1267 
1268 wait_for_sndbuf:
1269                         set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
1270 wait_for_memory:
1271                         if (copied)
1272                                 tcp_push(sk, flags & ~MSG_MORE, mss_now,
1273                                          TCP_NAGLE_PUSH, size_goal);
1274 
1275                         if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
1276                                 goto do_error;
1277 
1278                         mss_now = tcp_send_mss(sk, &size_goal, flags);
1279                 }
1280         }
1281 
1282 out:
1283         if (copied)
1284                 tcp_push(sk, flags, mss_now, tp->nonagle, size_goal);
1285         release_sock(sk);
1286         return copied + copied_syn;
1287 
1288 do_fault:
1289         if (!skb->len) {
1290                 tcp_unlink_write_queue(skb, sk);
1291                 /* It is the one place in all of TCP, except connection
1292                  * reset, where we can be unlinking the send_head.
1293                  */
1294                 tcp_check_send_head(sk, skb);
1295                 sk_wmem_free_skb(sk, skb);
1296         }
1297 
1298 do_error:
1299         if (copied + copied_syn)
1300                 goto out;
1301 out_err:
1302         err = sk_stream_error(sk, flags, err);
1303         release_sock(sk);
1304         return err;
1305 }
1306 EXPORT_SYMBOL(tcp_sendmsg);
1307 
1308 /*
1309  *      Handle reading urgent data. BSD has very simple semantics for
1310  *      this, no blocking and very strange errors 8)
1311  */
1312 
1313 static int tcp_recv_urg(struct sock *sk, struct msghdr *msg, int len, int flags)
1314 {
1315         struct tcp_sock *tp = tcp_sk(sk);
1316 
1317         /* No URG data to read. */
1318         if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
1319             tp->urg_data == TCP_URG_READ)
1320                 return -EINVAL; /* Yes this is right ! */
1321 
1322         if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
1323                 return -ENOTCONN;
1324 
1325         if (tp->urg_data & TCP_URG_VALID) {
1326                 int err = 0;
1327                 char c = tp->urg_data;
1328 
1329                 if (!(flags & MSG_PEEK))
1330                         tp->urg_data = TCP_URG_READ;
1331 
1332                 /* Read urgent data. */
1333                 msg->msg_flags |= MSG_OOB;
1334 
1335                 if (len > 0) {
1336                         if (!(flags & MSG_TRUNC))
1337                                 err = memcpy_toiovec(msg->msg_iov, &c, 1);
1338                         len = 1;
1339                 } else
1340                         msg->msg_flags |= MSG_TRUNC;
1341 
1342                 return err ? -EFAULT : len;
1343         }
1344 
1345         if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
1346                 return 0;
1347 
1348         /* Fixed the recv(..., MSG_OOB) behaviour.  BSD docs and
1349          * the available implementations agree in this case:
1350          * this call should never block, independent of the
1351          * blocking state of the socket.
1352          * Mike <pall@rz.uni-karlsruhe.de>
1353          */
1354         return -EAGAIN;
1355 }
1356 
1357 static int tcp_peek_sndq(struct sock *sk, struct msghdr *msg, int len)
1358 {
1359         struct sk_buff *skb;
1360         int copied = 0, err = 0;
1361 
1362         /* XXX -- need to support SO_PEEK_OFF */
1363 
1364         skb_queue_walk(&sk->sk_write_queue, skb) {
1365                 err = skb_copy_datagram_iovec(skb, 0, msg->msg_iov, skb->len);
1366                 if (err)
1367                         break;
1368 
1369                 copied += skb->len;
1370         }
1371 
1372         return err ?: copied;
1373 }
1374 
1375 /* Clean up the receive buffer for full frames taken by the user,
1376  * then send an ACK if necessary.  COPIED is the number of bytes
1377  * tcp_recvmsg has given to the user so far, it speeds up the
1378  * calculation of whether or not we must ACK for the sake of
1379  * a window update.
1380  */
1381 void tcp_cleanup_rbuf(struct sock *sk, int copied)
1382 {
1383         struct tcp_sock *tp = tcp_sk(sk);
1384         bool time_to_ack = false;
1385 
1386         struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1387 
1388         WARN(skb && !before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq),
1389              "cleanup rbuf bug: copied %X seq %X rcvnxt %X\n",
1390              tp->copied_seq, TCP_SKB_CB(skb)->end_seq, tp->rcv_nxt);
1391 
1392         if (inet_csk_ack_scheduled(sk)) {
1393                 const struct inet_connection_sock *icsk = inet_csk(sk);
1394                    /* Delayed ACKs frequently hit locked sockets during bulk
1395                     * receive. */
1396                 if (icsk->icsk_ack.blocked ||
1397                     /* Once-per-two-segments ACK was not sent by tcp_input.c */
1398                     tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
1399                     /*
1400                      * If this read emptied read buffer, we send ACK, if
1401                      * connection is not bidirectional, user drained
1402                      * receive buffer and there was a small segment
1403                      * in queue.
1404                      */
1405                     (copied > 0 &&
1406                      ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) ||
1407                       ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
1408                        !icsk->icsk_ack.pingpong)) &&
1409                       !atomic_read(&sk->sk_rmem_alloc)))
1410                         time_to_ack = true;
1411         }
1412 
1413         /* We send an ACK if we can now advertise a non-zero window
1414          * which has been raised "significantly".
1415          *
1416          * Even if window raised up to infinity, do not send window open ACK
1417          * in states, where we will not receive more. It is useless.
1418          */
1419         if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
1420                 __u32 rcv_window_now = tcp_receive_window(tp);
1421 
1422                 /* Optimize, __tcp_select_window() is not cheap. */
1423                 if (2*rcv_window_now <= tp->window_clamp) {
1424                         __u32 new_window = __tcp_select_window(sk);
1425 
1426                         /* Send ACK now, if this read freed lots of space
1427                          * in our buffer. Certainly, new_window is new window.
1428                          * We can advertise it now, if it is not less than current one.
1429                          * "Lots" means "at least twice" here.
1430                          */
1431                         if (new_window && new_window >= 2 * rcv_window_now)
1432                                 time_to_ack = true;
1433                 }
1434         }
1435         if (time_to_ack)
1436                 tcp_send_ack(sk);
1437 }
1438 
1439 static void tcp_prequeue_process(struct sock *sk)
1440 {
1441         struct sk_buff *skb;
1442         struct tcp_sock *tp = tcp_sk(sk);
1443 
1444         NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPPREQUEUED);
1445 
1446         /* RX process wants to run with disabled BHs, though it is not
1447          * necessary */
1448         local_bh_disable();
1449         while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
1450                 sk_backlog_rcv(sk, skb);
1451         local_bh_enable();
1452 
1453         /* Clear memory counter. */
1454         tp->ucopy.memory = 0;
1455 }
1456 
1457 #ifdef CONFIG_NET_DMA
1458 static void tcp_service_net_dma(struct sock *sk, bool wait)
1459 {
1460         dma_cookie_t done, used;
1461         dma_cookie_t last_issued;
1462         struct tcp_sock *tp = tcp_sk(sk);
1463 
1464         if (!tp->ucopy.dma_chan)
1465                 return;
1466 
1467         last_issued = tp->ucopy.dma_cookie;
1468         dma_async_issue_pending(tp->ucopy.dma_chan);
1469 
1470         do {
1471                 if (dma_async_is_tx_complete(tp->ucopy.dma_chan,
1472                                               last_issued, &done,
1473                                               &used) == DMA_COMPLETE) {
1474                         /* Safe to free early-copied skbs now */
1475                         __skb_queue_purge(&sk->sk_async_wait_queue);
1476                         break;
1477                 } else {
1478                         struct sk_buff *skb;
1479                         while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
1480                                (dma_async_is_complete(skb->dma_cookie, done,
1481                                                       used) == DMA_COMPLETE)) {
1482                                 __skb_dequeue(&sk->sk_async_wait_queue);
1483                                 kfree_skb(skb);
1484                         }
1485                 }
1486         } while (wait);
1487 }
1488 #endif
1489 
1490 static struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1491 {
1492         struct sk_buff *skb;
1493         u32 offset;
1494 
1495         while ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) {
1496                 offset = seq - TCP_SKB_CB(skb)->seq;
1497                 if (tcp_hdr(skb)->syn)
1498                         offset--;
1499                 if (offset < skb->len || tcp_hdr(skb)->fin) {
1500                         *off = offset;
1501                         return skb;
1502                 }
1503                 /* This looks weird, but this can happen if TCP collapsing
1504                  * splitted a fat GRO packet, while we released socket lock
1505                  * in skb_splice_bits()
1506                  */
1507                 sk_eat_skb(sk, skb, false);
1508         }
1509         return NULL;
1510 }
1511 
1512 /*
1513  * This routine provides an alternative to tcp_recvmsg() for routines
1514  * that would like to handle copying from skbuffs directly in 'sendfile'
1515  * fashion.
1516  * Note:
1517  *      - It is assumed that the socket was locked by the caller.
1518  *      - The routine does not block.
1519  *      - At present, there is no support for reading OOB data
1520  *        or for 'peeking' the socket using this routine
1521  *        (although both would be easy to implement).
1522  */
1523 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1524                   sk_read_actor_t recv_actor)
1525 {
1526         struct sk_buff *skb;
1527         struct tcp_sock *tp = tcp_sk(sk);
1528         u32 seq = tp->copied_seq;
1529         u32 offset;
1530         int copied = 0;
1531 
1532         if (sk->sk_state == TCP_LISTEN)
1533                 return -ENOTCONN;
1534         while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
1535                 if (offset < skb->len) {
1536                         int used;
1537                         size_t len;
1538 
1539                         len = skb->len - offset;
1540                         /* Stop reading if we hit a patch of urgent data */
1541                         if (tp->urg_data) {
1542                                 u32 urg_offset = tp->urg_seq - seq;
1543                                 if (urg_offset < len)
1544                                         len = urg_offset;
1545                                 if (!len)
1546                                         break;
1547                         }
1548                         used = recv_actor(desc, skb, offset, len);
1549                         if (used <= 0) {
1550                                 if (!copied)
1551                                         copied = used;
1552                                 break;
1553                         } else if (used <= len) {
1554                                 seq += used;
1555                                 copied += used;
1556                                 offset += used;
1557                         }
1558                         /* If recv_actor drops the lock (e.g. TCP splice
1559                          * receive) the skb pointer might be invalid when
1560                          * getting here: tcp_collapse might have deleted it
1561                          * while aggregating skbs from the socket queue.
1562                          */
1563                         skb = tcp_recv_skb(sk, seq - 1, &offset);
1564                         if (!skb)
1565                                 break;
1566                         /* TCP coalescing might have appended data to the skb.
1567                          * Try to splice more frags
1568                          */
1569                         if (offset + 1 != skb->len)
1570                                 continue;
1571                 }
1572                 if (tcp_hdr(skb)->fin) {
1573                         sk_eat_skb(sk, skb, false);
1574                         ++seq;
1575                         break;
1576                 }
1577                 sk_eat_skb(sk, skb, false);
1578                 if (!desc->count)
1579                         break;
1580                 tp->copied_seq = seq;
1581         }
1582         tp->copied_seq = seq;
1583 
1584         tcp_rcv_space_adjust(sk);
1585 
1586         /* Clean up data we have read: This will do ACK frames. */
1587         if (copied > 0) {
1588                 tcp_recv_skb(sk, seq, &offset);
1589                 tcp_cleanup_rbuf(sk, copied);
1590         }
1591         return copied;
1592 }
1593 EXPORT_SYMBOL(tcp_read_sock);
1594 
1595 /*
1596  *      This routine copies from a sock struct into the user buffer.
1597  *
1598  *      Technical note: in 2.3 we work on _locked_ socket, so that
1599  *      tricks with *seq access order and skb->users are not required.
1600  *      Probably, code can be easily improved even more.
1601  */
1602 
1603 int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1604                 size_t len, int nonblock, int flags, int *addr_len)
1605 {
1606         struct tcp_sock *tp = tcp_sk(sk);
1607         int copied = 0;
1608         u32 peek_seq;
1609         u32 *seq;
1610         unsigned long used;
1611         int err;
1612         int target;             /* Read at least this many bytes */
1613         long timeo;
1614         struct task_struct *user_recv = NULL;
1615         bool copied_early = false;
1616         struct sk_buff *skb;
1617         u32 urg_hole = 0;
1618 
1619         if (sk_can_busy_loop(sk) && skb_queue_empty(&sk->sk_receive_queue) &&
1620             (sk->sk_state == TCP_ESTABLISHED))
1621                 sk_busy_loop(sk, nonblock);
1622 
1623         lock_sock(sk);
1624 
1625         err = -ENOTCONN;
1626         if (sk->sk_state == TCP_LISTEN)
1627                 goto out;
1628 
1629         timeo = sock_rcvtimeo(sk, nonblock);
1630 
1631         /* Urgent data needs to be handled specially. */
1632         if (flags & MSG_OOB)
1633                 goto recv_urg;
1634 
1635         if (unlikely(tp->repair)) {
1636                 err = -EPERM;
1637                 if (!(flags & MSG_PEEK))
1638                         goto out;
1639 
1640                 if (tp->repair_queue == TCP_SEND_QUEUE)
1641                         goto recv_sndq;
1642 
1643                 err = -EINVAL;
1644                 if (tp->repair_queue == TCP_NO_QUEUE)
1645                         goto out;
1646 
1647                 /* 'common' recv queue MSG_PEEK-ing */
1648         }
1649 
1650         seq = &tp->copied_seq;
1651         if (flags & MSG_PEEK) {
1652                 peek_seq = tp->copied_seq;
1653                 seq = &peek_seq;
1654         }
1655 
1656         target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1657 
1658 #ifdef CONFIG_NET_DMA
1659         tp->ucopy.dma_chan = NULL;
1660         preempt_disable();
1661         skb = skb_peek_tail(&sk->sk_receive_queue);
1662         {
1663                 int available = 0;
1664 
1665                 if (skb)
1666                         available = TCP_SKB_CB(skb)->seq + skb->len - (*seq);
1667                 if ((available < target) &&
1668                     (len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
1669                     !sysctl_tcp_low_latency &&
1670                     net_dma_find_channel()) {
1671                         preempt_enable();
1672                         tp->ucopy.pinned_list =
1673                                         dma_pin_iovec_pages(msg->msg_iov, len);
1674                 } else {
1675                         preempt_enable();
1676                 }
1677         }
1678 #endif
1679 
1680         do {
1681                 u32 offset;
1682 
1683                 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
1684                 if (tp->urg_data && tp->urg_seq == *seq) {
1685                         if (copied)
1686                                 break;
1687                         if (signal_pending(current)) {
1688                                 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
1689                                 break;
1690                         }
1691                 }
1692 
1693                 /* Next get a buffer. */
1694 
1695                 skb_queue_walk(&sk->sk_receive_queue, skb) {
1696                         /* Now that we have two receive queues this
1697                          * shouldn't happen.
1698                          */
1699                         if (WARN(before(*seq, TCP_SKB_CB(skb)->seq),
1700                                  "recvmsg bug: copied %X seq %X rcvnxt %X fl %X\n",
1701                                  *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt,
1702                                  flags))
1703                                 break;
1704 
1705                         offset = *seq - TCP_SKB_CB(skb)->seq;
1706                         if (tcp_hdr(skb)->syn)
1707                                 offset--;
1708                         if (offset < skb->len)
1709                                 goto found_ok_skb;
1710                         if (tcp_hdr(skb)->fin)
1711                                 goto found_fin_ok;
1712                         WARN(!(flags & MSG_PEEK),
1713                              "recvmsg bug 2: copied %X seq %X rcvnxt %X fl %X\n",
1714                              *seq, TCP_SKB_CB(skb)->seq, tp->rcv_nxt, flags);
1715                 }
1716 
1717                 /* Well, if we have backlog, try to process it now yet. */
1718 
1719                 if (copied >= target && !sk->sk_backlog.tail)
1720                         break;
1721 
1722                 if (copied) {
1723                         if (sk->sk_err ||
1724                             sk->sk_state == TCP_CLOSE ||
1725                             (sk->sk_shutdown & RCV_SHUTDOWN) ||
1726                             !timeo ||
1727                             signal_pending(current))
1728                                 break;
1729                 } else {
1730                         if (sock_flag(sk, SOCK_DONE))
1731                                 break;
1732 
1733                         if (sk->sk_err) {
1734                                 copied = sock_error(sk);
1735                                 break;
1736                         }
1737 
1738                         if (sk->sk_shutdown & RCV_SHUTDOWN)
1739                                 break;
1740 
1741                         if (sk->sk_state == TCP_CLOSE) {
1742                                 if (!sock_flag(sk, SOCK_DONE)) {
1743                                         /* This occurs when user tries to read
1744                                          * from never connected socket.
1745                                          */
1746                                         copied = -ENOTCONN;
1747                                         break;
1748                                 }
1749                                 break;
1750                         }
1751 
1752                         if (!timeo) {
1753                                 copied = -EAGAIN;
1754                                 break;
1755                         }
1756 
1757                         if (signal_pending(current)) {
1758                                 copied = sock_intr_errno(timeo);
1759                                 break;
1760                         }
1761                 }
1762 
1763                 tcp_cleanup_rbuf(sk, copied);
1764 
1765                 if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) {
1766                         /* Install new reader */
1767                         if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
1768                                 user_recv = current;
1769                                 tp->ucopy.task = user_recv;
1770                                 tp->ucopy.iov = msg->msg_iov;
1771                         }
1772 
1773                         tp->ucopy.len = len;
1774 
1775                         WARN_ON(tp->copied_seq != tp->rcv_nxt &&
1776                                 !(flags & (MSG_PEEK | MSG_TRUNC)));
1777 
1778                         /* Ugly... If prequeue is not empty, we have to
1779                          * process it before releasing socket, otherwise
1780                          * order will be broken at second iteration.
1781                          * More elegant solution is required!!!
1782                          *
1783                          * Look: we have the following (pseudo)queues:
1784                          *
1785                          * 1. packets in flight
1786                          * 2. backlog
1787                          * 3. prequeue
1788                          * 4. receive_queue
1789                          *
1790                          * Each queue can be processed only if the next ones
1791                          * are empty. At this point we have empty receive_queue.
1792                          * But prequeue _can_ be not empty after 2nd iteration,
1793                          * when we jumped to start of loop because backlog
1794                          * processing added something to receive_queue.
1795                          * We cannot release_sock(), because backlog contains
1796                          * packets arrived _after_ prequeued ones.
1797                          *
1798                          * Shortly, algorithm is clear --- to process all
1799                          * the queues in order. We could make it more directly,
1800                          * requeueing packets from backlog to prequeue, if
1801                          * is not empty. It is more elegant, but eats cycles,
1802                          * unfortunately.
1803                          */
1804                         if (!skb_queue_empty(&tp->ucopy.prequeue))
1805                                 goto do_prequeue;
1806 
1807                         /* __ Set realtime policy in scheduler __ */
1808                 }
1809 
1810 #ifdef CONFIG_NET_DMA
1811                 if (tp->ucopy.dma_chan) {
1812                         if (tp->rcv_wnd == 0 &&
1813                             !skb_queue_empty(&sk->sk_async_wait_queue)) {
1814                                 tcp_service_net_dma(sk, true);
1815                                 tcp_cleanup_rbuf(sk, copied);
1816                         } else
1817                                 dma_async_issue_pending(tp->ucopy.dma_chan);
1818                 }
1819 #endif
1820                 if (copied >= target) {
1821                         /* Do not sleep, just process backlog. */
1822                         release_sock(sk);
1823                         lock_sock(sk);
1824                 } else
1825                         sk_wait_data(sk, &timeo);
1826 
1827 #ifdef CONFIG_NET_DMA
1828                 tcp_service_net_dma(sk, false);  /* Don't block */
1829                 tp->ucopy.wakeup = 0;
1830 #endif
1831 
1832                 if (user_recv) {
1833                         int chunk;
1834 
1835                         /* __ Restore normal policy in scheduler __ */
1836 
1837                         if ((chunk = len - tp->ucopy.len) != 0) {
1838                                 NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
1839                                 len -= chunk;
1840                                 copied += chunk;
1841                         }
1842 
1843                         if (tp->rcv_nxt == tp->copied_seq &&
1844                             !skb_queue_empty(&tp->ucopy.prequeue)) {
1845 do_prequeue:
1846                                 tcp_prequeue_process(sk);
1847 
1848                                 if ((chunk = len - tp->ucopy.len) != 0) {
1849                                         NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1850                                         len -= chunk;
1851                                         copied += chunk;
1852                                 }
1853                         }
1854                 }
1855                 if ((flags & MSG_PEEK) &&
1856                     (peek_seq - copied - urg_hole != tp->copied_seq)) {
1857                         net_dbg_ratelimited("TCP(%s:%d): Application bug, race in MSG_PEEK\n",
1858                                             current->comm,
1859                                             task_pid_nr(current));
1860                         peek_seq = tp->copied_seq;
1861                 }
1862                 continue;
1863 
1864         found_ok_skb:
1865                 /* Ok so how much can we use? */
1866                 used = skb->len - offset;
1867                 if (len < used)
1868                         used = len;
1869 
1870                 /* Do we have urgent data here? */
1871                 if (tp->urg_data) {
1872                         u32 urg_offset = tp->urg_seq - *seq;
1873                         if (urg_offset < used) {
1874                                 if (!urg_offset) {
1875                                         if (!sock_flag(sk, SOCK_URGINLINE)) {
1876                                                 ++*seq;
1877                                                 urg_hole++;
1878                                                 offset++;
1879                                                 used--;
1880                                                 if (!used)
1881                                                         goto skip_copy;
1882                                         }
1883                                 } else
1884                                         used = urg_offset;
1885                         }
1886                 }
1887 
1888                 if (!(flags & MSG_TRUNC)) {
1889 #ifdef CONFIG_NET_DMA
1890                         if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1891                                 tp->ucopy.dma_chan = net_dma_find_channel();
1892 
1893                         if (tp->ucopy.dma_chan) {
1894                                 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
1895                                         tp->ucopy.dma_chan, skb, offset,
1896                                         msg->msg_iov, used,
1897                                         tp->ucopy.pinned_list);
1898 
1899                                 if (tp->ucopy.dma_cookie < 0) {
1900 
1901                                         pr_alert("%s: dma_cookie < 0\n",
1902                                                  __func__);
1903 
1904                                         /* Exception. Bailout! */
1905                                         if (!copied)
1906                                                 copied = -EFAULT;
1907                                         break;
1908                                 }
1909 
1910                                 dma_async_issue_pending(tp->ucopy.dma_chan);
1911 
1912                                 if ((offset + used) == skb->len)
1913                                         copied_early = true;
1914 
1915                         } else
1916 #endif
1917                         {
1918                                 err = skb_copy_datagram_iovec(skb, offset,
1919                                                 msg->msg_iov, used);
1920                                 if (err) {
1921                                         /* Exception. Bailout! */
1922                                         if (!copied)
1923                                                 copied = -EFAULT;
1924                                         break;
1925                                 }
1926                         }
1927                 }
1928 
1929                 *seq += used;
1930                 copied += used;
1931                 len -= used;
1932 
1933                 tcp_rcv_space_adjust(sk);
1934 
1935 skip_copy:
1936                 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
1937                         tp->urg_data = 0;
1938                         tcp_fast_path_check(sk);
1939                 }
1940                 if (used + offset < skb->len)
1941                         continue;
1942 
1943                 if (tcp_hdr(skb)->fin)
1944                         goto found_fin_ok;
1945                 if (!(flags & MSG_PEEK)) {
1946                         sk_eat_skb(sk, skb, copied_early);
1947                         copied_early = false;
1948                 }
1949                 continue;
1950 
1951         found_fin_ok:
1952                 /* Process the FIN. */
1953                 ++*seq;
1954                 if (!(flags & MSG_PEEK)) {
1955                         sk_eat_skb(sk, skb, copied_early);
1956                         copied_early = false;
1957                 }
1958                 break;
1959         } while (len > 0);
1960 
1961         if (user_recv) {
1962                 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
1963                         int chunk;
1964 
1965                         tp->ucopy.len = copied > 0 ? len : 0;
1966 
1967                         tcp_prequeue_process(sk);
1968 
1969                         if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
1970                                 NET_ADD_STATS_USER(sock_net(sk), LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1971                                 len -= chunk;
1972                                 copied += chunk;
1973                         }
1974                 }
1975 
1976                 tp->ucopy.task = NULL;
1977                 tp->ucopy.len = 0;
1978         }
1979 
1980 #ifdef CONFIG_NET_DMA
1981         tcp_service_net_dma(sk, true);  /* Wait for queue to drain */
1982         tp->ucopy.dma_chan = NULL;
1983 
1984         if (tp->ucopy.pinned_list) {
1985                 dma_unpin_iovec_pages(tp->ucopy.pinned_list);
1986                 tp->ucopy.pinned_list = NULL;
1987         }
1988 #endif
1989 
1990         /* According to UNIX98, msg_name/msg_namelen are ignored
1991          * on connected socket. I was just happy when found this 8) --ANK
1992          */
1993 
1994         /* Clean up data we have read: This will do ACK frames. */
1995         tcp_cleanup_rbuf(sk, copied);
1996 
1997         release_sock(sk);
1998         return copied;
1999 
2000 out:
2001         release_sock(sk);
2002         return err;
2003 
2004 recv_urg:
2005         err = tcp_recv_urg(sk, msg, len, flags);
2006         goto out;
2007 
2008 recv_sndq:
2009         err = tcp_peek_sndq(sk, msg, len);
2010         goto out;
2011 }
2012 EXPORT_SYMBOL(tcp_recvmsg);
2013 
2014 void tcp_set_state(struct sock *sk, int state)
2015 {
2016         int oldstate = sk->sk_state;
2017 
2018         switch (state) {
2019         case TCP_ESTABLISHED:
2020                 if (oldstate != TCP_ESTABLISHED)
2021                         TCP_INC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
2022                 break;
2023 
2024         case TCP_CLOSE:
2025                 if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
2026                         TCP_INC_STATS(sock_net(sk), TCP_MIB_ESTABRESETS);
2027 
2028                 sk->sk_prot->unhash(sk);
2029                 if (inet_csk(sk)->icsk_bind_hash &&
2030                     !(sk->sk_userlocks & SOCK_BINDPORT_LOCK))
2031                         inet_put_port(sk);
2032                 /* fall through */
2033         default:
2034                 if (oldstate == TCP_ESTABLISHED)
2035                         TCP_DEC_STATS(sock_net(sk), TCP_MIB_CURRESTAB);
2036         }
2037 
2038         /* Change state AFTER socket is unhashed to avoid closed
2039          * socket sitting in hash tables.
2040          */
2041         sk->sk_state = state;
2042 
2043 #ifdef STATE_TRACE
2044         SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n", sk, statename[oldstate], statename[state]);
2045 #endif
2046 }
2047 EXPORT_SYMBOL_GPL(tcp_set_state);
2048 
2049 /*
2050  *      State processing on a close. This implements the state shift for
2051  *      sending our FIN frame. Note that we only send a FIN for some
2052  *      states. A shutdown() may have already sent the FIN, or we may be
2053  *      closed.
2054  */
2055 
2056 static const unsigned char new_state[16] = {
2057   /* current state:        new state:      action:      */
2058   /* (Invalid)          */ TCP_CLOSE,
2059   /* TCP_ESTABLISHED    */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
2060   /* TCP_SYN_SENT       */ TCP_CLOSE,
2061   /* TCP_SYN_RECV       */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
2062   /* TCP_FIN_WAIT1      */ TCP_FIN_WAIT1,
2063   /* TCP_FIN_WAIT2      */ TCP_FIN_WAIT2,
2064   /* TCP_TIME_WAIT      */ TCP_CLOSE,
2065   /* TCP_CLOSE          */ TCP_CLOSE,
2066   /* TCP_CLOSE_WAIT     */ TCP_LAST_ACK  | TCP_ACTION_FIN,
2067   /* TCP_LAST_ACK       */ TCP_LAST_ACK,
2068   /* TCP_LISTEN         */ TCP_CLOSE,
2069   /* TCP_CLOSING        */ TCP_CLOSING,
2070 };
2071 
2072 static int tcp_close_state(struct sock *sk)
2073 {
2074         int next = (int)new_state[sk->sk_state];
2075         int ns = next & TCP_STATE_MASK;
2076 
2077         tcp_set_state(sk, ns);
2078 
2079         return next & TCP_ACTION_FIN;
2080 }
2081 
2082 /*
2083  *      Shutdown the sending side of a connection. Much like close except
2084  *      that we don't receive shut down or sock_set_flag(sk, SOCK_DEAD).
2085  */
2086 
2087 void tcp_shutdown(struct sock *sk, int how)
2088 {
2089         /*      We need to grab some memory, and put together a FIN,
2090          *      and then put it into the queue to be sent.
2091          *              Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
2092          */
2093         if (!(how & SEND_SHUTDOWN))
2094                 return;
2095 
2096         /* If we've already sent a FIN, or it's a closed state, skip this. */
2097         if ((1 << sk->sk_state) &
2098             (TCPF_ESTABLISHED | TCPF_SYN_SENT |
2099              TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
2100                 /* Clear out any half completed packets.  FIN if needed. */
2101                 if (tcp_close_state(sk))
2102                         tcp_send_fin(sk);
2103         }
2104 }
2105 EXPORT_SYMBOL(tcp_shutdown);
2106 
2107 bool tcp_check_oom(struct sock *sk, int shift)
2108 {
2109         bool too_many_orphans, out_of_socket_memory;
2110 
2111         too_many_orphans = tcp_too_many_orphans(sk, shift);
2112         out_of_socket_memory = tcp_out_of_memory(sk);
2113 
2114         if (too_many_orphans)
2115                 net_info_ratelimited("too many orphaned sockets\n");
2116         if (out_of_socket_memory)
2117                 net_info_ratelimited("out of memory -- consider tuning tcp_mem\n");
2118         return too_many_orphans || out_of_socket_memory;
2119 }
2120 
2121 void tcp_close(struct sock *sk, long timeout)
2122 {
2123         struct sk_buff *skb;
2124         int data_was_unread = 0;
2125         int state;
2126 
2127         lock_sock(sk);
2128         sk->sk_shutdown = SHUTDOWN_MASK;
2129 
2130         if (sk->sk_state == TCP_LISTEN) {
2131                 tcp_set_state(sk, TCP_CLOSE);
2132 
2133                 /* Special case. */
2134                 inet_csk_listen_stop(sk);
2135 
2136                 goto adjudge_to_death;
2137         }
2138 
2139         /*  We need to flush the recv. buffs.  We do this only on the
2140          *  descriptor close, not protocol-sourced closes, because the
2141          *  reader process may not have drained the data yet!
2142          */
2143         while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
2144                 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
2145                           tcp_hdr(skb)->fin;
2146                 data_was_unread += len;
2147                 __kfree_skb(skb);
2148         }
2149 
2150         sk_mem_reclaim(sk);
2151 
2152         /* If socket has been already reset (e.g. in tcp_reset()) - kill it. */
2153         if (sk->sk_state == TCP_CLOSE)
2154                 goto adjudge_to_death;
2155 
2156         /* As outlined in RFC 2525, section 2.17, we send a RST here because
2157          * data was lost. To witness the awful effects of the old behavior of
2158          * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk
2159          * GET in an FTP client, suspend the process, wait for the client to
2160          * advertise a zero window, then kill -9 the FTP client, wheee...
2161          * Note: timeout is always zero in such a case.
2162          */
2163         if (unlikely(tcp_sk(sk)->repair)) {
2164                 sk->sk_prot->disconnect(sk, 0);
2165         } else if (data_was_unread) {
2166                 /* Unread data was tossed, zap the connection. */
2167                 NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONCLOSE);
2168                 tcp_set_state(sk, TCP_CLOSE);
2169                 tcp_send_active_reset(sk, sk->sk_allocation);
2170         } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
2171                 /* Check zero linger _after_ checking for unread data. */
2172                 sk->sk_prot->disconnect(sk, 0);
2173                 NET_INC_STATS_USER(sock_net(sk), LINUX_MIB_TCPABORTONDATA);
2174         } else if (tcp_close_state(sk)) {
2175                 /* We FIN if the application ate all the data before
2176                  * zapping the connection.
2177                  */
2178 
2179                 /* RED-PEN. Formally speaking, we have broken TCP state
2180                  * machine. State transitions:
2181                  *
2182                  * TCP_ESTABLISHED -> TCP_FIN_WAIT1
2183                  * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
2184                  * TCP_CLOSE_WAIT -> TCP_LAST_ACK
2185                  *
2186                  * are legal only when FIN has been sent (i.e. in window),
2187                  * rather than queued out of window. Purists blame.
2188                  *
2189                  * F.e. "RFC state" is ESTABLISHED,
2190                  * if Linux state is FIN-WAIT-1, but FIN is still not sent.
2191                  *
2192                  * The visible declinations are that sometimes
2193                  * we enter time-wait state, when it is not required really
2194                  * (harmless), do not send active resets, when they are
2195                  * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
2196                  * they look as CLOSING or LAST_ACK for Linux)
2197                  * Probably, I missed some more holelets.
2198                  *                                              --ANK
2199                  * XXX (TFO) - To start off we don't support SYN+ACK+FIN
2200                  * in a single packet! (May consider it later but will
2201                  * probably need API support or TCP_CORK SYN-ACK until
2202                  * data is written and socket is closed.)
2203                  */
2204                 tcp_send_fin(sk);
2205         }
2206 
2207         sk_stream_wait_close(sk, timeout);
2208 
2209 adjudge_to_death:
2210         state = sk->sk_state;
2211         sock_hold(sk);
2212         sock_orphan(sk);
2213 
2214         /* It is the last release_sock in its life. It will remove backlog. */
2215         release_sock(sk);
2216 
2217 
2218         /* Now socket is owned by kernel and we acquire BH lock
2219            to finish close. No need to check for user refs.
2220          */
2221         local_bh_disable();
2222         bh_lock_sock(sk);
2223         WARN_ON(sock_owned_by_user(sk));
2224 
2225         percpu_counter_inc(sk->sk_prot->orphan_count);
2226 
2227         /* Have we already been destroyed by a softirq or backlog? */
2228         if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
2229                 goto out;
2230 
2231         /*      This is a (useful) BSD violating of the RFC. There is a
2232          *      problem with TCP as specified in that the other end could
2233          *      keep a socket open forever with no application left this end.
2234          *      We use a 1 minute timeout (about the same as BSD) then kill
2235          *      our end. If they send after that then tough - BUT: long enough
2236          *      that we won't make the old 4*rto = almost no time - whoops
2237          *      reset mistake.
2238          *
2239          *      Nope, it was not mistake. It is really desired behaviour
2240          *      f.e. on http servers, when such sockets are useless, but
2241          *      consume significant resources. Let's do it with special
2242          *      linger2 option.                                 --ANK
2243          */
2244 
2245         if (sk->sk_state == TCP_FIN_WAIT2) {
2246                 struct tcp_sock *tp = tcp_sk(sk);
2247                 if (tp->linger2 < 0) {
2248                         tcp_set_state(sk, TCP_CLOSE);
2249                         tcp_send_active_reset(sk, GFP_ATOMIC);
2250                         NET_INC_STATS_BH(sock_net(sk),
2251                                         LINUX_MIB_TCPABORTONLINGER);
2252                 } else {
2253                         const int tmo = tcp_fin_time(sk);
2254 
2255                         if (tmo > TCP_TIMEWAIT_LEN) {
2256                                 inet_csk_reset_keepalive_timer(sk,
2257                                                 tmo - TCP_TIMEWAIT_LEN);
2258                         } else {
2259                                 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
2260                                 goto out;
2261                         }
2262                 }
2263         }
2264         if (sk->sk_state != TCP_CLOSE) {
2265                 sk_mem_reclaim(sk);
2266                 if (tcp_check_oom(sk, 0)) {
2267                         tcp_set_state(sk, TCP_CLOSE);
2268                         tcp_send_active_reset(sk, GFP_ATOMIC);
2269                         NET_INC_STATS_BH(sock_net(sk),
2270                                         LINUX_MIB_TCPABORTONMEMORY);
2271                 }
2272         }
2273 
2274         if (sk->sk_state == TCP_CLOSE) {
2275                 struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
2276                 /* We could get here with a non-NULL req if the socket is
2277                  * aborted (e.g., closed with unread data) before 3WHS
2278                  * finishes.
2279                  */
2280                 if (req != NULL)
2281                         reqsk_fastopen_remove(sk, req, false);
2282                 inet_csk_destroy_sock(sk);
2283         }
2284         /* Otherwise, socket is reprieved until protocol close. */
2285 
2286 out:
2287         bh_unlock_sock(sk);
2288         local_bh_enable();
2289         sock_put(sk);
2290 }
2291 EXPORT_SYMBOL(tcp_close);
2292 
2293 /* These states need RST on ABORT according to RFC793 */
2294 
2295 static inline bool tcp_need_reset(int state)
2296 {
2297         return (1 << state) &
2298                (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
2299                 TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
2300 }
2301 
2302 int tcp_disconnect(struct sock *sk, int flags)
2303 {
2304         struct inet_sock *inet = inet_sk(sk);
2305         struct inet_connection_sock *icsk = inet_csk(sk);
2306         struct tcp_sock *tp = tcp_sk(sk);
2307         int err = 0;
2308         int old_state = sk->sk_state;
2309 
2310         if (old_state != TCP_CLOSE)
2311                 tcp_set_state(sk, TCP_CLOSE);
2312 
2313         /* ABORT function of RFC793 */
2314         if (old_state == TCP_LISTEN) {
2315                 inet_csk_listen_stop(sk);
2316         } else if (unlikely(tp->repair)) {
2317                 sk->sk_err = ECONNABORTED;
2318         } else if (tcp_need_reset(old_state) ||
2319                    (tp->snd_nxt != tp->write_seq &&
2320                     (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
2321                 /* The last check adjusts for discrepancy of Linux wrt. RFC
2322                  * states
2323                  */
2324                 tcp_send_active_reset(sk, gfp_any());
2325                 sk->sk_err = ECONNRESET;
2326         } else if (old_state == TCP_SYN_SENT)
2327                 sk->sk_err = ECONNRESET;
2328 
2329         tcp_clear_xmit_timers(sk);
2330         __skb_queue_purge(&sk->sk_receive_queue);
2331         tcp_write_queue_purge(sk);
2332         __skb_queue_purge(&tp->out_of_order_queue);
2333 #ifdef CONFIG_NET_DMA
2334         __skb_queue_purge(&sk->sk_async_wait_queue);
2335 #endif
2336 
2337         inet->inet_dport = 0;
2338 
2339         if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
2340                 inet_reset_saddr(sk);
2341 
2342         sk->sk_shutdown = 0;
2343         sock_reset_flag(sk, SOCK_DONE);
2344         tp->srtt_us = 0;
2345         if ((tp->write_seq += tp->max_window + 2) == 0)
2346                 tp->write_seq = 1;
2347         icsk->icsk_backoff = 0;
2348         tp->snd_cwnd = 2;
2349         icsk->icsk_probes_out = 0;
2350         tp->packets_out = 0;
2351         tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
2352         tp->snd_cwnd_cnt = 0;
2353         tp->window_clamp = 0;
2354         tcp_set_ca_state(sk, TCP_CA_Open);
2355         tcp_clear_retrans(tp);
2356         inet_csk_delack_init(sk);
2357         tcp_init_send_head(sk);
2358         memset(&tp->rx_opt, 0, sizeof(tp->rx_opt));
2359         __sk_dst_reset(sk);
2360 
2361         WARN_ON(inet->inet_num && !icsk->icsk_bind_hash);
2362 
2363         sk->sk_error_report(sk);
2364         return err;
2365 }
2366 EXPORT_SYMBOL(tcp_disconnect);
2367 
2368 void tcp_sock_destruct(struct sock *sk)
2369 {
2370         inet_sock_destruct(sk);
2371 
2372         kfree(inet_csk(sk)->icsk_accept_queue.fastopenq);
2373 }
2374 
2375 static inline bool tcp_can_repair_sock(const struct sock *sk)
2376 {
2377         return ns_capable(sock_net(sk)->user_ns, CAP_NET_ADMIN) &&
2378                 ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_ESTABLISHED));
2379 }
2380 
2381 static int tcp_repair_options_est(struct tcp_sock *tp,
2382                 struct tcp_repair_opt __user *optbuf, unsigned int len)
2383 {
2384         struct tcp_repair_opt opt;
2385 
2386         while (len >= sizeof(opt)) {
2387                 if (copy_from_user(&opt, optbuf, sizeof(opt)))
2388                         return -EFAULT;
2389 
2390                 optbuf++;
2391                 len -= sizeof(opt);
2392 
2393                 switch (opt.opt_code) {
2394                 case TCPOPT_MSS:
2395                         tp->rx_opt.mss_clamp = opt.opt_val;
2396                         break;
2397                 case TCPOPT_WINDOW:
2398                         {
2399                                 u16 snd_wscale = opt.opt_val & 0xFFFF;
2400                                 u16 rcv_wscale = opt.opt_val >> 16;
2401 
2402                                 if (snd_wscale > 14 || rcv_wscale > 14)
2403                                         return -EFBIG;
2404 
2405                                 tp->rx_opt.snd_wscale = snd_wscale;
2406                                 tp->rx_opt.rcv_wscale = rcv_wscale;
2407                                 tp->rx_opt.wscale_ok = 1;
2408                         }
2409                         break;
2410                 case TCPOPT_SACK_PERM:
2411                         if (opt.opt_val != 0)
2412                                 return -EINVAL;
2413 
2414                         tp->rx_opt.sack_ok |= TCP_SACK_SEEN;
2415                         if (sysctl_tcp_fack)
2416                                 tcp_enable_fack(tp);
2417                         break;
2418                 case TCPOPT_TIMESTAMP:
2419                         if (opt.opt_val != 0)
2420                                 return -EINVAL;
2421 
2422                         tp->rx_opt.tstamp_ok = 1;
2423                         break;
2424                 }
2425         }
2426 
2427         return 0;
2428 }
2429 
2430 /*
2431  *      Socket option code for TCP.
2432  */
2433 static int do_tcp_setsockopt(struct sock *sk, int level,
2434                 int optname, char __user *optval, unsigned int optlen)
2435 {
2436         struct tcp_sock *tp = tcp_sk(sk);
2437         struct inet_connection_sock *icsk = inet_csk(sk);
2438         int val;
2439         int err = 0;
2440 
2441         /* These are data/string values, all the others are ints */
2442         switch (optname) {
2443         case TCP_CONGESTION: {
2444                 char name[TCP_CA_NAME_MAX];
2445 
2446                 if (optlen < 1)
2447                         return -EINVAL;
2448 
2449                 val = strncpy_from_user(name, optval,
2450                                         min_t(long, TCP_CA_NAME_MAX-1, optlen));
2451                 if (val < 0)
2452                         return -EFAULT;
2453                 name[val] = 0;
2454 
2455                 lock_sock(sk);
2456                 err = tcp_set_congestion_control(sk, name);
2457                 release_sock(sk);
2458                 return err;
2459         }
2460         default:
2461                 /* fallthru */
2462                 break;
2463         }
2464 
2465         if (optlen < sizeof(int))
2466                 return -EINVAL;
2467 
2468         if (get_user(val, (int __user *)optval))
2469                 return -EFAULT;
2470 
2471         lock_sock(sk);
2472 
2473         switch (optname) {
2474         case TCP_MAXSEG:
2475                 /* Values greater than interface MTU won't take effect. However
2476                  * at the point when this call is done we typically don't yet
2477                  * know which interface is going to be used */
2478                 if (val < TCP_MIN_MSS || val > MAX_TCP_WINDOW) {
2479                         err = -EINVAL;
2480                         break;
2481                 }
2482                 tp->rx_opt.user_mss = val;
2483                 break;
2484 
2485         case TCP_NODELAY:
2486                 if (val) {
2487                         /* TCP_NODELAY is weaker than TCP_CORK, so that
2488                          * this option on corked socket is remembered, but
2489                          * it is not activated until cork is cleared.
2490                          *
2491                          * However, when TCP_NODELAY is set we make
2492                          * an explicit push, which overrides even TCP_CORK
2493                          * for currently queued segments.
2494                          */
2495                         tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
2496                         tcp_push_pending_frames(sk);
2497                 } else {
2498                         tp->nonagle &= ~TCP_NAGLE_OFF;
2499                 }
2500                 break;
2501 
2502         case TCP_THIN_LINEAR_TIMEOUTS:
2503                 if (val < 0 || val > 1)
2504                         err = -EINVAL;
2505                 else
2506                         tp->thin_lto = val;
2507                 break;
2508 
2509         case TCP_THIN_DUPACK:
2510                 if (val < 0 || val > 1)
2511                         err = -EINVAL;
2512                 else {
2513                         tp->thin_dupack = val;
2514                         if (tp->thin_dupack)
2515                                 tcp_disable_early_retrans(tp);
2516                 }
2517                 break;
2518 
2519         case TCP_REPAIR:
2520                 if (!tcp_can_repair_sock(sk))
2521                         err = -EPERM;
2522                 else if (val == 1) {
2523                         tp->repair = 1;
2524                         sk->sk_reuse = SK_FORCE_REUSE;
2525                         tp->repair_queue = TCP_NO_QUEUE;
2526                 } else if (val == 0) {
2527                         tp->repair = 0;
2528                         sk->sk_reuse = SK_NO_REUSE;
2529                         tcp_send_window_probe(sk);
2530                 } else
2531                         err = -EINVAL;
2532 
2533                 break;
2534 
2535         case TCP_REPAIR_QUEUE:
2536                 if (!tp->repair)
2537                         err = -EPERM;
2538                 else if (val < TCP_QUEUES_NR)
2539                         tp->repair_queue = val;
2540                 else
2541                         err = -EINVAL;
2542                 break;
2543 
2544         case TCP_QUEUE_SEQ:
2545                 if (sk->sk_state != TCP_CLOSE)
2546                         err = -EPERM;
2547                 else if (tp->repair_queue == TCP_SEND_QUEUE)
2548                         tp->write_seq = val;
2549                 else if (tp->repair_queue == TCP_RECV_QUEUE)
2550                         tp->rcv_nxt = val;
2551                 else
2552                         err = -EINVAL;
2553                 break;
2554 
2555         case TCP_REPAIR_OPTIONS:
2556                 if (!tp->repair)
2557                         err = -EINVAL;
2558                 else if (sk->sk_state == TCP_ESTABLISHED)
2559                         err = tcp_repair_options_est(tp,
2560                                         (struct tcp_repair_opt __user *)optval,
2561                                         optlen);
2562                 else
2563                         err = -EPERM;
2564                 break;
2565 
2566         case TCP_CORK:
2567                 /* When set indicates to always queue non-full frames.
2568                  * Later the user clears this option and we transmit
2569                  * any pending partial frames in the queue.  This is
2570                  * meant to be used alongside sendfile() to get properly
2571                  * filled frames when the user (for example) must write
2572                  * out headers with a write() call first and then use
2573                  * sendfile to send out the data parts.
2574                  *
2575                  * TCP_CORK can be set together with TCP_NODELAY and it is
2576                  * stronger than TCP_NODELAY.
2577                  */
2578                 if (val) {
2579                         tp->nonagle |= TCP_NAGLE_CORK;
2580                 } else {
2581                         tp->nonagle &= ~TCP_NAGLE_CORK;
2582                         if (tp->nonagle&TCP_NAGLE_OFF)
2583                                 tp->nonagle |= TCP_NAGLE_PUSH;
2584                         tcp_push_pending_frames(sk);
2585                 }
2586                 break;
2587 
2588         case TCP_KEEPIDLE:
2589                 if (val < 1 || val > MAX_TCP_KEEPIDLE)
2590                         err = -EINVAL;
2591                 else {
2592                         tp->keepalive_time = val * HZ;
2593                         if (sock_flag(sk, SOCK_KEEPOPEN) &&
2594                             !((1 << sk->sk_state) &
2595                               (TCPF_CLOSE | TCPF_LISTEN))) {
2596                                 u32 elapsed = keepalive_time_elapsed(tp);
2597                                 if (tp->keepalive_time > elapsed)
2598                                         elapsed = tp->keepalive_time - elapsed;
2599                                 else
2600                                         elapsed = 0;
2601                                 inet_csk_reset_keepalive_timer(sk, elapsed);
2602                         }
2603                 }
2604                 break;
2605         case TCP_KEEPINTVL:
2606                 if (val < 1 || val > MAX_TCP_KEEPINTVL)
2607                         err = -EINVAL;
2608                 else
2609                         tp->keepalive_intvl = val * HZ;
2610                 break;
2611         case TCP_KEEPCNT:
2612                 if (val < 1 || val > MAX_TCP_KEEPCNT)
2613                         err = -EINVAL;
2614                 else
2615                         tp->keepalive_probes = val;
2616                 break;
2617         case TCP_SYNCNT:
2618                 if (val < 1 || val > MAX_TCP_SYNCNT)
2619                         err = -EINVAL;
2620                 else
2621                         icsk->icsk_syn_retries = val;
2622                 break;
2623 
2624         case TCP_LINGER2:
2625                 if (val < 0)
2626                         tp->linger2 = -1;
2627                 else if (val > sysctl_tcp_fin_timeout / HZ)
2628                         tp->linger2 = 0;
2629                 else
2630                         tp->linger2 = val * HZ;
2631                 break;
2632 
2633         case TCP_DEFER_ACCEPT:
2634                 /* Translate value in seconds to number of retransmits */
2635                 icsk->icsk_accept_queue.rskq_defer_accept =
2636                         secs_to_retrans(val, TCP_TIMEOUT_INIT / HZ,
2637                                         TCP_RTO_MAX / HZ);
2638                 break;
2639 
2640         case TCP_WINDOW_CLAMP:
2641                 if (!val) {
2642                         if (sk->sk_state != TCP_CLOSE) {
2643                                 err = -EINVAL;
2644                                 break;
2645                         }
2646                         tp->window_clamp = 0;
2647                 } else
2648                         tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
2649                                                 SOCK_MIN_RCVBUF / 2 : val;
2650                 break;
2651 
2652         case TCP_QUICKACK:
2653                 if (!val) {
2654                         icsk->icsk_ack.pingpong = 1;
2655                 } else {
2656                         icsk->icsk_ack.pingpong = 0;
2657                         if ((1 << sk->sk_state) &
2658                             (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
2659                             inet_csk_ack_scheduled(sk)) {
2660                                 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
2661                                 tcp_cleanup_rbuf(sk, 1);
2662                                 if (!(val & 1))
2663                                         icsk->icsk_ack.pingpong = 1;
2664                         }
2665                 }
2666                 break;
2667 
2668 #ifdef CONFIG_TCP_MD5SIG
2669         case TCP_MD5SIG:
2670                 /* Read the IP->Key mappings from userspace */
2671                 err = tp->af_specific->md5_parse(sk, optval, optlen);
2672                 break;
2673 #endif
2674         case TCP_USER_TIMEOUT:
2675                 /* Cap the max timeout in ms TCP will retry/retrans
2676                  * before giving up and aborting (ETIMEDOUT) a connection.
2677                  */
2678                 if (val < 0)
2679                         err = -EINVAL;
2680                 else
2681                         icsk->icsk_user_timeout = msecs_to_jiffies(val);
2682                 break;
2683 
2684         case TCP_FASTOPEN:
2685                 if (val >= 0 && ((1 << sk->sk_state) & (TCPF_CLOSE |
2686                     TCPF_LISTEN)))
2687                         err = fastopen_init_queue(sk, val);
2688                 else
2689                         err = -EINVAL;
2690                 break;
2691         case TCP_TIMESTAMP:
2692                 if (!tp->repair)
2693                         err = -EPERM;
2694                 else
2695                         tp->tsoffset = val - tcp_time_stamp;
2696                 break;
2697         case TCP_NOTSENT_LOWAT:
2698                 tp->notsent_lowat = val;
2699                 sk->sk_write_space(sk);
2700                 break;
2701         default:
2702                 err = -ENOPROTOOPT;
2703                 break;
2704         }
2705 
2706         release_sock(sk);
2707         return err;
2708 }
2709 
2710 int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
2711                    unsigned int optlen)
2712 {
2713         const struct inet_connection_sock *icsk = inet_csk(sk);
2714 
2715         if (level != SOL_TCP)
2716                 return icsk->icsk_af_ops->setsockopt(sk, level, optname,
2717                                                      optval, optlen);
2718         return do_tcp_setsockopt(sk, level, optname, optval, optlen);
2719 }
2720 EXPORT_SYMBOL(tcp_setsockopt);
2721 
2722 #ifdef CONFIG_COMPAT
2723 int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
2724                           char __user *optval, unsigned int optlen)
2725 {
2726         if (level != SOL_TCP)
2727                 return inet_csk_compat_setsockopt(sk, level, optname,
2728                                                   optval, optlen);
2729         return do_tcp_setsockopt(sk, level, optname, optval, optlen);
2730 }
2731 EXPORT_SYMBOL(compat_tcp_setsockopt);
2732 #endif
2733 
2734 /* Return information about state of tcp endpoint in API format. */
2735 void tcp_get_info(const struct sock *sk, struct tcp_info *info)
2736 {
2737         const struct tcp_sock *tp = tcp_sk(sk);
2738         const struct inet_connection_sock *icsk = inet_csk(sk);
2739         u32 now = tcp_time_stamp;
2740 
2741         memset(info, 0, sizeof(*info));
2742 
2743         info->tcpi_state = sk->sk_state;
2744         info->tcpi_ca_state = icsk->icsk_ca_state;
2745         info->tcpi_retransmits = icsk->icsk_retransmits;
2746         info->tcpi_probes = icsk->icsk_probes_out;
2747         info->tcpi_backoff = icsk->icsk_backoff;
2748 
2749         if (tp->rx_opt.tstamp_ok)
2750                 info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
2751         if (tcp_is_sack(tp))
2752                 info->tcpi_options |= TCPI_OPT_SACK;
2753         if (tp->rx_opt.wscale_ok) {
2754                 info->tcpi_options |= TCPI_OPT_WSCALE;
2755                 info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
2756                 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
2757         }
2758 
2759         if (tp->ecn_flags & TCP_ECN_OK)
2760                 info->tcpi_options |= TCPI_OPT_ECN;
2761         if (tp->ecn_flags & TCP_ECN_SEEN)
2762                 info->tcpi_options |= TCPI_OPT_ECN_SEEN;
2763         if (tp->syn_data_acked)
2764                 info->tcpi_options |= TCPI_OPT_SYN_DATA;
2765 
2766         info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
2767         info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
2768         info->tcpi_snd_mss = tp->mss_cache;
2769         info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
2770 
2771         if (sk->sk_state == TCP_LISTEN) {
2772                 info->tcpi_unacked = sk->sk_ack_backlog;
2773                 info->tcpi_sacked = sk->sk_max_ack_backlog;
2774         } else {
2775                 info->tcpi_unacked = tp->packets_out;
2776                 info->tcpi_sacked = tp->sacked_out;
2777         }
2778         info->tcpi_lost = tp->lost_out;
2779         info->tcpi_retrans = tp->retrans_out;
2780         info->tcpi_fackets = tp->fackets_out;
2781 
2782         info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
2783         info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
2784         info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
2785 
2786         info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
2787         info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
2788         info->tcpi_rtt = tp->srtt_us >> 3;
2789         info->tcpi_rttvar = tp->mdev_us >> 2;
2790         info->tcpi_snd_ssthresh = tp->snd_ssthresh;
2791         info->tcpi_snd_cwnd = tp->snd_cwnd;
2792         info->tcpi_advmss = tp->advmss;
2793         info->tcpi_reordering = tp->reordering;
2794 
2795         info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;
2796         info->tcpi_rcv_space = tp->rcvq_space.space;
2797 
2798         info->tcpi_total_retrans = tp->total_retrans;
2799 
2800         info->tcpi_pacing_rate = sk->sk_pacing_rate != ~0U ?
2801                                         sk->sk_pacing_rate : ~0ULL;
2802         info->tcpi_max_pacing_rate = sk->sk_max_pacing_rate != ~0U ?
2803                                         sk->sk_max_pacing_rate : ~0ULL;
2804 }
2805 EXPORT_SYMBOL_GPL(tcp_get_info);
2806 
2807 static int do_tcp_getsockopt(struct sock *sk, int level,
2808                 int optname, char __user *optval, int __user *optlen)
2809 {
2810         struct inet_connection_sock *icsk = inet_csk(sk);
2811         struct tcp_sock *tp = tcp_sk(sk);
2812         int val, len;
2813 
2814         if (get_user(len, optlen))
2815                 return -EFAULT;
2816 
2817         len = min_t(unsigned int, len, sizeof(int));
2818 
2819         if (len < 0)
2820                 return -EINVAL;
2821 
2822         switch (optname) {
2823         case TCP_MAXSEG:
2824                 val = tp->mss_cache;
2825                 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
2826                         val = tp->rx_opt.user_mss;
2827                 if (tp->repair)
2828                         val = tp->rx_opt.mss_clamp;
2829                 break;
2830         case TCP_NODELAY:
2831                 val = !!(tp->nonagle&TCP_NAGLE_OFF);
2832                 break;
2833         case TCP_CORK:
2834                 val = !!(tp->nonagle&TCP_NAGLE_CORK);
2835                 break;
2836         case TCP_KEEPIDLE:
2837                 val = keepalive_time_when(tp) / HZ;
2838                 break;
2839         case TCP_KEEPINTVL:
2840                 val = keepalive_intvl_when(tp) / HZ;
2841                 break;
2842         case TCP_KEEPCNT:
2843                 val = keepalive_probes(tp);
2844                 break;
2845         case TCP_SYNCNT:
2846                 val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
2847                 break;
2848         case TCP_LINGER2:
2849                 val = tp->linger2;
2850                 if (val >= 0)
2851                         val = (val ? : sysctl_tcp_fin_timeout) / HZ;
2852                 break;
2853         case TCP_DEFER_ACCEPT:
2854                 val = retrans_to_secs(icsk->icsk_accept_queue.rskq_defer_accept,
2855                                       TCP_TIMEOUT_INIT / HZ, TCP_RTO_MAX / HZ);
2856                 break;
2857         case TCP_WINDOW_CLAMP:
2858                 val = tp->window_clamp;
2859                 break;
2860         case TCP_INFO: {
2861                 struct tcp_info info;
2862 
2863                 if (get_user(len, optlen))
2864                         return -EFAULT;
2865 
2866                 tcp_get_info(sk, &info);
2867 
2868                 len = min_t(unsigned int, len, sizeof(info));
2869                 if (put_user(len, optlen))
2870                         return -EFAULT;
2871                 if (copy_to_user(optval, &info, len))
2872                         return -EFAULT;
2873                 return 0;
2874         }
2875         case TCP_QUICKACK:
2876                 val = !icsk->icsk_ack.pingpong;
2877                 break;
2878 
2879         case TCP_CONGESTION:
2880                 if (get_user(len, optlen))
2881                         return -EFAULT;
2882                 len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
2883                 if (put_user(len, optlen))
2884                         return -EFAULT;
2885                 if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
2886                         return -EFAULT;
2887                 return 0;
2888 
2889         case TCP_THIN_LINEAR_TIMEOUTS:
2890                 val = tp->thin_lto;
2891                 break;
2892         case TCP_THIN_DUPACK:
2893                 val = tp->thin_dupack;
2894                 break;
2895 
2896         case TCP_REPAIR:
2897                 val = tp->repair;
2898                 break;
2899 
2900         case TCP_REPAIR_QUEUE:
2901                 if (tp->repair)
2902                         val = tp->repair_queue;
2903                 else
2904                         return -EINVAL;
2905                 break;
2906 
2907         case TCP_QUEUE_SEQ:
2908                 if (tp->repair_queue == TCP_SEND_QUEUE)
2909                         val = tp->write_seq;
2910                 else if (tp->repair_queue == TCP_RECV_QUEUE)
2911                         val = tp->rcv_nxt;
2912                 else
2913                         return -EINVAL;
2914                 break;
2915 
2916         case TCP_USER_TIMEOUT:
2917                 val = jiffies_to_msecs(icsk->icsk_user_timeout);
2918                 break;
2919         case TCP_TIMESTAMP:
2920                 val = tcp_time_stamp + tp->tsoffset;
2921                 break;
2922         case TCP_NOTSENT_LOWAT:
2923                 val = tp->notsent_lowat;
2924                 break;
2925         default:
2926                 return -ENOPROTOOPT;
2927         }
2928 
2929         if (put_user(len, optlen))
2930                 return -EFAULT;
2931         if (copy_to_user(optval, &val, len))
2932                 return -EFAULT;
2933         return 0;
2934 }
2935 
2936 int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
2937                    int __user *optlen)
2938 {
2939         struct inet_connection_sock *icsk = inet_csk(sk);
2940 
2941         if (level != SOL_TCP)
2942                 return icsk->icsk_af_ops->getsockopt(sk, level, optname,
2943                                                      optval, optlen);
2944         return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2945 }
2946 EXPORT_SYMBOL(tcp_getsockopt);
2947 
2948 #ifdef CONFIG_COMPAT
2949 int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
2950                           char __user *optval, int __user *optlen)
2951 {
2952         if (level != SOL_TCP)
2953                 return inet_csk_compat_getsockopt(sk, level, optname,
2954                                                   optval, optlen);
2955         return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2956 }
2957 EXPORT_SYMBOL(compat_tcp_getsockopt);
2958 #endif
2959 
2960 #ifdef CONFIG_TCP_MD5SIG
2961 static struct tcp_md5sig_pool __percpu *tcp_md5sig_pool __read_mostly;
2962 static DEFINE_MUTEX(tcp_md5sig_mutex);
2963 
2964 static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool __percpu *pool)
2965 {
2966         int cpu;
2967 
2968         for_each_possible_cpu(cpu) {
2969                 struct tcp_md5sig_pool *p = per_cpu_ptr(pool, cpu);
2970 
2971                 if (p->md5_desc.tfm)
2972                         crypto_free_hash(p->md5_desc.tfm);
2973         }
2974         free_percpu(pool);
2975 }
2976 
2977 static void __tcp_alloc_md5sig_pool(void)
2978 {
2979         int cpu;
2980         struct tcp_md5sig_pool __percpu *pool;
2981 
2982         pool = alloc_percpu(struct tcp_md5sig_pool);
2983         if (!pool)
2984                 return;
2985 
2986         for_each_possible_cpu(cpu) {
2987                 struct crypto_hash *hash;
2988 
2989                 hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
2990                 if (IS_ERR_OR_NULL(hash))
2991                         goto out_free;
2992 
2993                 per_cpu_ptr(pool, cpu)->md5_desc.tfm = hash;
2994         }
2995         /* before setting tcp_md5sig_pool, we must commit all writes
2996          * to memory. See ACCESS_ONCE() in tcp_get_md5sig_pool()
2997          */
2998         smp_wmb();
2999         tcp_md5sig_pool = pool;
3000         return;
3001 out_free:
3002         __tcp_free_md5sig_pool(pool);
3003 }
3004 
3005 bool tcp_alloc_md5sig_pool(void)
3006 {
3007         if (unlikely(!tcp_md5sig_pool)) {
3008                 mutex_lock(&tcp_md5sig_mutex);
3009 
3010                 if (!tcp_md5sig_pool)
3011                         __tcp_alloc_md5sig_pool();
3012 
3013                 mutex_unlock(&tcp_md5sig_mutex);
3014         }
3015         return tcp_md5sig_pool != NULL;
3016 }
3017 EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
3018 
3019 
3020 /**
3021  *      tcp_get_md5sig_pool - get md5sig_pool for this user
3022  *
3023  *      We use percpu structure, so if we succeed, we exit with preemption
3024  *      and BH disabled, to make sure another thread or softirq handling
3025  *      wont try to get same context.
3026  */
3027 struct tcp_md5sig_pool *tcp_get_md5sig_pool(void)
3028 {
3029         struct tcp_md5sig_pool __percpu *p;
3030 
3031         local_bh_disable();
3032         p = ACCESS_ONCE(tcp_md5sig_pool);
3033         if (p)
3034                 return __this_cpu_ptr(p);
3035 
3036         local_bh_enable();
3037         return NULL;
3038 }
3039 EXPORT_SYMBOL(tcp_get_md5sig_pool);
3040 
3041 int tcp_md5_hash_header(struct tcp_md5sig_pool *hp,
3042                         const struct tcphdr *th)
3043 {
3044         struct scatterlist sg;
3045         struct tcphdr hdr;
3046         int err;
3047 
3048         /* We are not allowed to change tcphdr, make a local copy */
3049         memcpy(&hdr, th, sizeof(hdr));
3050         hdr.check = 0;
3051 
3052         /* options aren't included in the hash */
3053         sg_init_one(&sg, &hdr, sizeof(hdr));
3054         err = crypto_hash_update(&hp->md5_desc, &sg, sizeof(hdr));
3055         return err;
3056 }
3057 EXPORT_SYMBOL(tcp_md5_hash_header);
3058 
3059 int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp,
3060                           const struct sk_buff *skb, unsigned int header_len)
3061 {
3062         struct scatterlist sg;
3063         const struct tcphdr *tp = tcp_hdr(skb);
3064         struct hash_desc *desc = &hp->md5_desc;
3065         unsigned int i;
3066         const unsigned int head_data_len = skb_headlen(skb) > header_len ?
3067                                            skb_headlen(skb) - header_len : 0;
3068         const struct skb_shared_info *shi = skb_shinfo(skb);
3069         struct sk_buff *frag_iter;
3070 
3071         sg_init_table(&sg, 1);
3072 
3073         sg_set_buf(&sg, ((u8 *) tp) + header_len, head_data_len);
3074         if (crypto_hash_update(desc, &sg, head_data_len))
3075                 return 1;
3076 
3077         for (i = 0; i < shi->nr_frags; ++i) {
3078                 const struct skb_frag_struct *f = &shi->frags[i];
3079                 unsigned int offset = f->page_offset;
3080                 struct page *page = skb_frag_page(f) + (offset >> PAGE_SHIFT);
3081 
3082                 sg_set_page(&sg, page, skb_frag_size(f),
3083                             offset_in_page(offset));
3084                 if (crypto_hash_update(desc, &sg, skb_frag_size(f)))
3085                         return 1;
3086         }
3087 
3088         skb_walk_frags(skb, frag_iter)
3089                 if (tcp_md5_hash_skb_data(hp, frag_iter, 0))
3090                         return 1;
3091 
3092         return 0;
3093 }
3094 EXPORT_SYMBOL(tcp_md5_hash_skb_data);
3095 
3096 int tcp_md5_hash_key(struct tcp_md5sig_pool *hp, const struct tcp_md5sig_key *key)
3097 {
3098         struct scatterlist sg;
3099 
3100         sg_init_one(&sg, key->key, key->keylen);
3101         return crypto_hash_update(&hp->md5_desc, &sg, key->keylen);
3102 }
3103 EXPORT_SYMBOL(tcp_md5_hash_key);
3104 
3105 #endif
3106 
3107 void tcp_done(struct sock *sk)
3108 {
3109         struct request_sock *req = tcp_sk(sk)->fastopen_rsk;
3110 
3111         if (sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
3112                 TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_ATTEMPTFAILS);
3113 
3114         tcp_set_state(sk, TCP_CLOSE);
3115         tcp_clear_xmit_timers(sk);
3116         if (req != NULL)
3117                 reqsk_fastopen_remove(sk, req, false);
3118 
3119         sk->sk_shutdown = SHUTDOWN_MASK;
3120 
3121         if (!sock_flag(sk, SOCK_DEAD))
3122                 sk->sk_state_change(sk);
3123         else
3124                 inet_csk_destroy_sock(sk);
3125 }
3126 EXPORT_SYMBOL_GPL(tcp_done);
3127 
3128 extern struct tcp_congestion_ops tcp_reno;
3129 
3130 static __initdata unsigned long thash_entries;
3131 static int __init set_thash_entries(char *str)
3132 {
3133         ssize_t ret;
3134 
3135         if (!str)
3136                 return 0;
3137 
3138         ret = kstrtoul(str, 0, &thash_entries);
3139         if (ret)
3140                 return 0;
3141 
3142         return 1;
3143 }
3144 __setup("thash_entries=", set_thash_entries);
3145 
3146 static void tcp_init_mem(void)
3147 {
3148         unsigned long limit = nr_free_buffer_pages() / 8;
3149         limit = max(limit, 128UL);
3150         sysctl_tcp_mem[0] = limit / 4 * 3;
3151         sysctl_tcp_mem[1] = limit;
3152         sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;
3153 }
3154 
3155 void __init tcp_init(void)
3156 {
3157         struct sk_buff *skb = NULL;
3158         unsigned long limit;
3159         int max_rshare, max_wshare, cnt;
3160         unsigned int i;
3161 
3162         BUILD_BUG_ON(sizeof(struct tcp_skb_cb) > sizeof(skb->cb));
3163 
3164         percpu_counter_init(&tcp_sockets_allocated, 0);
3165         percpu_counter_init(&tcp_orphan_count, 0);
3166         tcp_hashinfo.bind_bucket_cachep =
3167                 kmem_cache_create("tcp_bind_bucket",
3168                                   sizeof(struct inet_bind_bucket), 0,
3169                                   SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
3170 
3171         /* Size and allocate the main established and bind bucket
3172          * hash tables.
3173          *
3174          * The methodology is similar to that of the buffer cache.
3175          */
3176         tcp_hashinfo.ehash =
3177                 alloc_large_system_hash("TCP established",
3178                                         sizeof(struct inet_ehash_bucket),
3179                                         thash_entries,
3180                                         17, /* one slot per 128 KB of memory */
3181                                         0,
3182                                         NULL,
3183                                         &tcp_hashinfo.ehash_mask,
3184                                         0,
3185                                         thash_entries ? 0 : 512 * 1024);
3186         for (i = 0; i <= tcp_hashinfo.ehash_mask; i++)
3187                 INIT_HLIST_NULLS_HEAD(&tcp_hashinfo.ehash[i].chain, i);
3188 
3189         if (inet_ehash_locks_alloc(&tcp_hashinfo))
3190                 panic("TCP: failed to alloc ehash_locks");
3191         tcp_hashinfo.bhash =
3192                 alloc_large_system_hash("TCP bind",
3193                                         sizeof(struct inet_bind_hashbucket),
3194                                         tcp_hashinfo.ehash_mask + 1,
3195                                         17, /* one slot per 128 KB of memory */
3196                                         0,
3197                                         &tcp_hashinfo.bhash_size,
3198                                         NULL,
3199                                         0,
3200                                         64 * 1024);
3201         tcp_hashinfo.bhash_size = 1U << tcp_hashinfo.bhash_size;
3202         for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
3203                 spin_lock_init(&tcp_hashinfo.bhash[i].lock);
3204                 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
3205         }
3206 
3207 
3208         cnt = tcp_hashinfo.ehash_mask + 1;
3209 
3210         tcp_death_row.sysctl_max_tw_buckets = cnt / 2;
3211         sysctl_tcp_max_orphans = cnt / 2;
3212         sysctl_max_syn_backlog = max(128, cnt / 256);
3213 
3214         tcp_init_mem();
3215         /* Set per-socket limits to no more than 1/128 the pressure threshold */
3216         limit = nr_free_buffer_pages() << (PAGE_SHIFT - 7);
3217         max_wshare = min(4UL*1024*1024, limit);
3218         max_rshare = min(6UL*1024*1024, limit);
3219 
3220         sysctl_tcp_wmem[0] = SK_MEM_QUANTUM;
3221         sysctl_tcp_wmem[1] = 16*1024;
3222         sysctl_tcp_wmem[2] = max(64*1024, max_wshare);
3223 
3224         sysctl_tcp_rmem[0] = SK_MEM_QUANTUM;
3225         sysctl_tcp_rmem[1] = 87380;
3226         sysctl_tcp_rmem[2] = max(87380, max_rshare);
3227 
3228         pr_info("Hash tables configured (established %u bind %u)\n",
3229                 tcp_hashinfo.ehash_mask + 1, tcp_hashinfo.bhash_size);
3230 
3231         tcp_metrics_init();
3232 
3233         tcp_register_congestion_control(&tcp_reno);
3234 
3235         tcp_tasklet_init();
3236 }
3237 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us