Version:  2.0.40 2.2.26 2.4.37 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4

Linux/net/ipv4/tcp_ipv4.c

  1 /*
  2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
  3  *              operating system.  INET is implemented using the  BSD Socket
  4  *              interface as the means of communication with the user level.
  5  *
  6  *              Implementation of the Transmission Control Protocol(TCP).
  7  *
  8  *              IPv4 specific functions
  9  *
 10  *
 11  *              code split from:
 12  *              linux/ipv4/tcp.c
 13  *              linux/ipv4/tcp_input.c
 14  *              linux/ipv4/tcp_output.c
 15  *
 16  *              See tcp.c for author information
 17  *
 18  *      This program is free software; you can redistribute it and/or
 19  *      modify it under the terms of the GNU General Public License
 20  *      as published by the Free Software Foundation; either version
 21  *      2 of the License, or (at your option) any later version.
 22  */
 23 
 24 /*
 25  * Changes:
 26  *              David S. Miller :       New socket lookup architecture.
 27  *                                      This code is dedicated to John Dyson.
 28  *              David S. Miller :       Change semantics of established hash,
 29  *                                      half is devoted to TIME_WAIT sockets
 30  *                                      and the rest go in the other half.
 31  *              Andi Kleen :            Add support for syncookies and fixed
 32  *                                      some bugs: ip options weren't passed to
 33  *                                      the TCP layer, missed a check for an
 34  *                                      ACK bit.
 35  *              Andi Kleen :            Implemented fast path mtu discovery.
 36  *                                      Fixed many serious bugs in the
 37  *                                      request_sock handling and moved
 38  *                                      most of it into the af independent code.
 39  *                                      Added tail drop and some other bugfixes.
 40  *                                      Added new listen semantics.
 41  *              Mike McLagan    :       Routing by source
 42  *      Juan Jose Ciarlante:            ip_dynaddr bits
 43  *              Andi Kleen:             various fixes.
 44  *      Vitaly E. Lavrov        :       Transparent proxy revived after year
 45  *                                      coma.
 46  *      Andi Kleen              :       Fix new listen.
 47  *      Andi Kleen              :       Fix accept error reporting.
 48  *      YOSHIFUJI Hideaki @USAGI and:   Support IPV6_V6ONLY socket option, which
 49  *      Alexey Kuznetsov                allow both IPv4 and IPv6 sockets to bind
 50  *                                      a single port at the same time.
 51  */
 52 
 53 #define pr_fmt(fmt) "TCP: " fmt
 54 
 55 #include <linux/bottom_half.h>
 56 #include <linux/types.h>
 57 #include <linux/fcntl.h>
 58 #include <linux/module.h>
 59 #include <linux/random.h>
 60 #include <linux/cache.h>
 61 #include <linux/jhash.h>
 62 #include <linux/init.h>
 63 #include <linux/times.h>
 64 #include <linux/slab.h>
 65 
 66 #include <net/net_namespace.h>
 67 #include <net/icmp.h>
 68 #include <net/inet_hashtables.h>
 69 #include <net/tcp.h>
 70 #include <net/transp_v6.h>
 71 #include <net/ipv6.h>
 72 #include <net/inet_common.h>
 73 #include <net/timewait_sock.h>
 74 #include <net/xfrm.h>
 75 #include <net/secure_seq.h>
 76 #include <net/tcp_memcontrol.h>
 77 #include <net/busy_poll.h>
 78 
 79 #include <linux/inet.h>
 80 #include <linux/ipv6.h>
 81 #include <linux/stddef.h>
 82 #include <linux/proc_fs.h>
 83 #include <linux/seq_file.h>
 84 
 85 #include <linux/crypto.h>
 86 #include <linux/scatterlist.h>
 87 
 88 int sysctl_tcp_tw_reuse __read_mostly;
 89 int sysctl_tcp_low_latency __read_mostly;
 90 EXPORT_SYMBOL(sysctl_tcp_low_latency);
 91 
 92 #ifdef CONFIG_TCP_MD5SIG
 93 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
 94                                __be32 daddr, __be32 saddr, const struct tcphdr *th);
 95 #endif
 96 
 97 struct inet_hashinfo tcp_hashinfo;
 98 EXPORT_SYMBOL(tcp_hashinfo);
 99 
100 static  __u32 tcp_v4_init_sequence(const struct sk_buff *skb)
101 {
102         return secure_tcp_sequence_number(ip_hdr(skb)->daddr,
103                                           ip_hdr(skb)->saddr,
104                                           tcp_hdr(skb)->dest,
105                                           tcp_hdr(skb)->source);
106 }
107 
108 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp)
109 {
110         const struct tcp_timewait_sock *tcptw = tcp_twsk(sktw);
111         struct tcp_sock *tp = tcp_sk(sk);
112 
113         /* With PAWS, it is safe from the viewpoint
114            of data integrity. Even without PAWS it is safe provided sequence
115            spaces do not overlap i.e. at data rates <= 80Mbit/sec.
116 
117            Actually, the idea is close to VJ's one, only timestamp cache is
118            held not per host, but per port pair and TW bucket is used as state
119            holder.
120 
121            If TW bucket has been already destroyed we fall back to VJ's scheme
122            and use initial timestamp retrieved from peer table.
123          */
124         if (tcptw->tw_ts_recent_stamp &&
125             (!twp || (sysctl_tcp_tw_reuse &&
126                              get_seconds() - tcptw->tw_ts_recent_stamp > 1))) {
127                 tp->write_seq = tcptw->tw_snd_nxt + 65535 + 2;
128                 if (tp->write_seq == 0)
129                         tp->write_seq = 1;
130                 tp->rx_opt.ts_recent       = tcptw->tw_ts_recent;
131                 tp->rx_opt.ts_recent_stamp = tcptw->tw_ts_recent_stamp;
132                 sock_hold(sktw);
133                 return 1;
134         }
135 
136         return 0;
137 }
138 EXPORT_SYMBOL_GPL(tcp_twsk_unique);
139 
140 /* This will initiate an outgoing connection. */
141 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len)
142 {
143         struct sockaddr_in *usin = (struct sockaddr_in *)uaddr;
144         struct inet_sock *inet = inet_sk(sk);
145         struct tcp_sock *tp = tcp_sk(sk);
146         __be16 orig_sport, orig_dport;
147         __be32 daddr, nexthop;
148         struct flowi4 *fl4;
149         struct rtable *rt;
150         int err;
151         struct ip_options_rcu *inet_opt;
152 
153         if (addr_len < sizeof(struct sockaddr_in))
154                 return -EINVAL;
155 
156         if (usin->sin_family != AF_INET)
157                 return -EAFNOSUPPORT;
158 
159         nexthop = daddr = usin->sin_addr.s_addr;
160         inet_opt = rcu_dereference_protected(inet->inet_opt,
161                                              sock_owned_by_user(sk));
162         if (inet_opt && inet_opt->opt.srr) {
163                 if (!daddr)
164                         return -EINVAL;
165                 nexthop = inet_opt->opt.faddr;
166         }
167 
168         orig_sport = inet->inet_sport;
169         orig_dport = usin->sin_port;
170         fl4 = &inet->cork.fl.u.ip4;
171         rt = ip_route_connect(fl4, nexthop, inet->inet_saddr,
172                               RT_CONN_FLAGS(sk), sk->sk_bound_dev_if,
173                               IPPROTO_TCP,
174                               orig_sport, orig_dport, sk);
175         if (IS_ERR(rt)) {
176                 err = PTR_ERR(rt);
177                 if (err == -ENETUNREACH)
178                         IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
179                 return err;
180         }
181 
182         if (rt->rt_flags & (RTCF_MULTICAST | RTCF_BROADCAST)) {
183                 ip_rt_put(rt);
184                 return -ENETUNREACH;
185         }
186 
187         if (!inet_opt || !inet_opt->opt.srr)
188                 daddr = fl4->daddr;
189 
190         if (!inet->inet_saddr)
191                 inet->inet_saddr = fl4->saddr;
192         sk_rcv_saddr_set(sk, inet->inet_saddr);
193 
194         if (tp->rx_opt.ts_recent_stamp && inet->inet_daddr != daddr) {
195                 /* Reset inherited state */
196                 tp->rx_opt.ts_recent       = 0;
197                 tp->rx_opt.ts_recent_stamp = 0;
198                 if (likely(!tp->repair))
199                         tp->write_seq      = 0;
200         }
201 
202         if (tcp_death_row.sysctl_tw_recycle &&
203             !tp->rx_opt.ts_recent_stamp && fl4->daddr == daddr)
204                 tcp_fetch_timewait_stamp(sk, &rt->dst);
205 
206         inet->inet_dport = usin->sin_port;
207         sk_daddr_set(sk, daddr);
208 
209         inet_csk(sk)->icsk_ext_hdr_len = 0;
210         if (inet_opt)
211                 inet_csk(sk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
212 
213         tp->rx_opt.mss_clamp = TCP_MSS_DEFAULT;
214 
215         /* Socket identity is still unknown (sport may be zero).
216          * However we set state to SYN-SENT and not releasing socket
217          * lock select source port, enter ourselves into the hash tables and
218          * complete initialization after this.
219          */
220         tcp_set_state(sk, TCP_SYN_SENT);
221         err = inet_hash_connect(&tcp_death_row, sk);
222         if (err)
223                 goto failure;
224 
225         sk_set_txhash(sk);
226 
227         rt = ip_route_newports(fl4, rt, orig_sport, orig_dport,
228                                inet->inet_sport, inet->inet_dport, sk);
229         if (IS_ERR(rt)) {
230                 err = PTR_ERR(rt);
231                 rt = NULL;
232                 goto failure;
233         }
234         /* OK, now commit destination to socket.  */
235         sk->sk_gso_type = SKB_GSO_TCPV4;
236         sk_setup_caps(sk, &rt->dst);
237 
238         if (!tp->write_seq && likely(!tp->repair))
239                 tp->write_seq = secure_tcp_sequence_number(inet->inet_saddr,
240                                                            inet->inet_daddr,
241                                                            inet->inet_sport,
242                                                            usin->sin_port);
243 
244         inet->inet_id = tp->write_seq ^ jiffies;
245 
246         err = tcp_connect(sk);
247 
248         rt = NULL;
249         if (err)
250                 goto failure;
251 
252         return 0;
253 
254 failure:
255         /*
256          * This unhashes the socket and releases the local port,
257          * if necessary.
258          */
259         tcp_set_state(sk, TCP_CLOSE);
260         ip_rt_put(rt);
261         sk->sk_route_caps = 0;
262         inet->inet_dport = 0;
263         return err;
264 }
265 EXPORT_SYMBOL(tcp_v4_connect);
266 
267 /*
268  * This routine reacts to ICMP_FRAG_NEEDED mtu indications as defined in RFC1191.
269  * It can be called through tcp_release_cb() if socket was owned by user
270  * at the time tcp_v4_err() was called to handle ICMP message.
271  */
272 void tcp_v4_mtu_reduced(struct sock *sk)
273 {
274         struct dst_entry *dst;
275         struct inet_sock *inet = inet_sk(sk);
276         u32 mtu = tcp_sk(sk)->mtu_info;
277 
278         dst = inet_csk_update_pmtu(sk, mtu);
279         if (!dst)
280                 return;
281 
282         /* Something is about to be wrong... Remember soft error
283          * for the case, if this connection will not able to recover.
284          */
285         if (mtu < dst_mtu(dst) && ip_dont_fragment(sk, dst))
286                 sk->sk_err_soft = EMSGSIZE;
287 
288         mtu = dst_mtu(dst);
289 
290         if (inet->pmtudisc != IP_PMTUDISC_DONT &&
291             ip_sk_accept_pmtu(sk) &&
292             inet_csk(sk)->icsk_pmtu_cookie > mtu) {
293                 tcp_sync_mss(sk, mtu);
294 
295                 /* Resend the TCP packet because it's
296                  * clear that the old packet has been
297                  * dropped. This is the new "fast" path mtu
298                  * discovery.
299                  */
300                 tcp_simple_retransmit(sk);
301         } /* else let the usual retransmit timer handle it */
302 }
303 EXPORT_SYMBOL(tcp_v4_mtu_reduced);
304 
305 static void do_redirect(struct sk_buff *skb, struct sock *sk)
306 {
307         struct dst_entry *dst = __sk_dst_check(sk, 0);
308 
309         if (dst)
310                 dst->ops->redirect(dst, sk, skb);
311 }
312 
313 
314 /* handle ICMP messages on TCP_NEW_SYN_RECV request sockets */
315 void tcp_req_err(struct sock *sk, u32 seq)
316 {
317         struct request_sock *req = inet_reqsk(sk);
318         struct net *net = sock_net(sk);
319 
320         /* ICMPs are not backlogged, hence we cannot get
321          * an established socket here.
322          */
323         WARN_ON(req->sk);
324 
325         if (seq != tcp_rsk(req)->snt_isn) {
326                 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
327         } else {
328                 /*
329                  * Still in SYN_RECV, just remove it silently.
330                  * There is no good way to pass the error to the newly
331                  * created socket, and POSIX does not want network
332                  * errors returned from accept().
333                  */
334                 inet_csk_reqsk_queue_drop(req->rsk_listener, req);
335                 NET_INC_STATS_BH(net, LINUX_MIB_LISTENDROPS);
336         }
337         reqsk_put(req);
338 }
339 EXPORT_SYMBOL(tcp_req_err);
340 
341 /*
342  * This routine is called by the ICMP module when it gets some
343  * sort of error condition.  If err < 0 then the socket should
344  * be closed and the error returned to the user.  If err > 0
345  * it's just the icmp type << 8 | icmp code.  After adjustment
346  * header points to the first 8 bytes of the tcp header.  We need
347  * to find the appropriate port.
348  *
349  * The locking strategy used here is very "optimistic". When
350  * someone else accesses the socket the ICMP is just dropped
351  * and for some paths there is no check at all.
352  * A more general error queue to queue errors for later handling
353  * is probably better.
354  *
355  */
356 
357 void tcp_v4_err(struct sk_buff *icmp_skb, u32 info)
358 {
359         const struct iphdr *iph = (const struct iphdr *)icmp_skb->data;
360         struct tcphdr *th = (struct tcphdr *)(icmp_skb->data + (iph->ihl << 2));
361         struct inet_connection_sock *icsk;
362         struct tcp_sock *tp;
363         struct inet_sock *inet;
364         const int type = icmp_hdr(icmp_skb)->type;
365         const int code = icmp_hdr(icmp_skb)->code;
366         struct sock *sk;
367         struct sk_buff *skb;
368         struct request_sock *fastopen;
369         __u32 seq, snd_una;
370         __u32 remaining;
371         int err;
372         struct net *net = dev_net(icmp_skb->dev);
373 
374         sk = __inet_lookup_established(net, &tcp_hashinfo, iph->daddr,
375                                        th->dest, iph->saddr, ntohs(th->source),
376                                        inet_iif(icmp_skb));
377         if (!sk) {
378                 ICMP_INC_STATS_BH(net, ICMP_MIB_INERRORS);
379                 return;
380         }
381         if (sk->sk_state == TCP_TIME_WAIT) {
382                 inet_twsk_put(inet_twsk(sk));
383                 return;
384         }
385         seq = ntohl(th->seq);
386         if (sk->sk_state == TCP_NEW_SYN_RECV)
387                 return tcp_req_err(sk, seq);
388 
389         bh_lock_sock(sk);
390         /* If too many ICMPs get dropped on busy
391          * servers this needs to be solved differently.
392          * We do take care of PMTU discovery (RFC1191) special case :
393          * we can receive locally generated ICMP messages while socket is held.
394          */
395         if (sock_owned_by_user(sk)) {
396                 if (!(type == ICMP_DEST_UNREACH && code == ICMP_FRAG_NEEDED))
397                         NET_INC_STATS_BH(net, LINUX_MIB_LOCKDROPPEDICMPS);
398         }
399         if (sk->sk_state == TCP_CLOSE)
400                 goto out;
401 
402         if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
403                 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
404                 goto out;
405         }
406 
407         icsk = inet_csk(sk);
408         tp = tcp_sk(sk);
409         /* XXX (TFO) - tp->snd_una should be ISN (tcp_create_openreq_child() */
410         fastopen = tp->fastopen_rsk;
411         snd_una = fastopen ? tcp_rsk(fastopen)->snt_isn : tp->snd_una;
412         if (sk->sk_state != TCP_LISTEN &&
413             !between(seq, snd_una, tp->snd_nxt)) {
414                 NET_INC_STATS_BH(net, LINUX_MIB_OUTOFWINDOWICMPS);
415                 goto out;
416         }
417 
418         switch (type) {
419         case ICMP_REDIRECT:
420                 do_redirect(icmp_skb, sk);
421                 goto out;
422         case ICMP_SOURCE_QUENCH:
423                 /* Just silently ignore these. */
424                 goto out;
425         case ICMP_PARAMETERPROB:
426                 err = EPROTO;
427                 break;
428         case ICMP_DEST_UNREACH:
429                 if (code > NR_ICMP_UNREACH)
430                         goto out;
431 
432                 if (code == ICMP_FRAG_NEEDED) { /* PMTU discovery (RFC1191) */
433                         /* We are not interested in TCP_LISTEN and open_requests
434                          * (SYN-ACKs send out by Linux are always <576bytes so
435                          * they should go through unfragmented).
436                          */
437                         if (sk->sk_state == TCP_LISTEN)
438                                 goto out;
439 
440                         tp->mtu_info = info;
441                         if (!sock_owned_by_user(sk)) {
442                                 tcp_v4_mtu_reduced(sk);
443                         } else {
444                                 if (!test_and_set_bit(TCP_MTU_REDUCED_DEFERRED, &tp->tsq_flags))
445                                         sock_hold(sk);
446                         }
447                         goto out;
448                 }
449 
450                 err = icmp_err_convert[code].errno;
451                 /* check if icmp_skb allows revert of backoff
452                  * (see draft-zimmermann-tcp-lcd) */
453                 if (code != ICMP_NET_UNREACH && code != ICMP_HOST_UNREACH)
454                         break;
455                 if (seq != tp->snd_una  || !icsk->icsk_retransmits ||
456                     !icsk->icsk_backoff || fastopen)
457                         break;
458 
459                 if (sock_owned_by_user(sk))
460                         break;
461 
462                 icsk->icsk_backoff--;
463                 icsk->icsk_rto = tp->srtt_us ? __tcp_set_rto(tp) :
464                                                TCP_TIMEOUT_INIT;
465                 icsk->icsk_rto = inet_csk_rto_backoff(icsk, TCP_RTO_MAX);
466 
467                 skb = tcp_write_queue_head(sk);
468                 BUG_ON(!skb);
469 
470                 remaining = icsk->icsk_rto -
471                             min(icsk->icsk_rto,
472                                 tcp_time_stamp - tcp_skb_timestamp(skb));
473 
474                 if (remaining) {
475                         inet_csk_reset_xmit_timer(sk, ICSK_TIME_RETRANS,
476                                                   remaining, TCP_RTO_MAX);
477                 } else {
478                         /* RTO revert clocked out retransmission.
479                          * Will retransmit now */
480                         tcp_retransmit_timer(sk);
481                 }
482 
483                 break;
484         case ICMP_TIME_EXCEEDED:
485                 err = EHOSTUNREACH;
486                 break;
487         default:
488                 goto out;
489         }
490 
491         switch (sk->sk_state) {
492         case TCP_SYN_SENT:
493         case TCP_SYN_RECV:
494                 /* Only in fast or simultaneous open. If a fast open socket is
495                  * is already accepted it is treated as a connected one below.
496                  */
497                 if (fastopen && !fastopen->sk)
498                         break;
499 
500                 if (!sock_owned_by_user(sk)) {
501                         sk->sk_err = err;
502 
503                         sk->sk_error_report(sk);
504 
505                         tcp_done(sk);
506                 } else {
507                         sk->sk_err_soft = err;
508                 }
509                 goto out;
510         }
511 
512         /* If we've already connected we will keep trying
513          * until we time out, or the user gives up.
514          *
515          * rfc1122 4.2.3.9 allows to consider as hard errors
516          * only PROTO_UNREACH and PORT_UNREACH (well, FRAG_FAILED too,
517          * but it is obsoleted by pmtu discovery).
518          *
519          * Note, that in modern internet, where routing is unreliable
520          * and in each dark corner broken firewalls sit, sending random
521          * errors ordered by their masters even this two messages finally lose
522          * their original sense (even Linux sends invalid PORT_UNREACHs)
523          *
524          * Now we are in compliance with RFCs.
525          *                                                      --ANK (980905)
526          */
527 
528         inet = inet_sk(sk);
529         if (!sock_owned_by_user(sk) && inet->recverr) {
530                 sk->sk_err = err;
531                 sk->sk_error_report(sk);
532         } else  { /* Only an error on timeout */
533                 sk->sk_err_soft = err;
534         }
535 
536 out:
537         bh_unlock_sock(sk);
538         sock_put(sk);
539 }
540 
541 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr)
542 {
543         struct tcphdr *th = tcp_hdr(skb);
544 
545         if (skb->ip_summed == CHECKSUM_PARTIAL) {
546                 th->check = ~tcp_v4_check(skb->len, saddr, daddr, 0);
547                 skb->csum_start = skb_transport_header(skb) - skb->head;
548                 skb->csum_offset = offsetof(struct tcphdr, check);
549         } else {
550                 th->check = tcp_v4_check(skb->len, saddr, daddr,
551                                          csum_partial(th,
552                                                       th->doff << 2,
553                                                       skb->csum));
554         }
555 }
556 
557 /* This routine computes an IPv4 TCP checksum. */
558 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb)
559 {
560         const struct inet_sock *inet = inet_sk(sk);
561 
562         __tcp_v4_send_check(skb, inet->inet_saddr, inet->inet_daddr);
563 }
564 EXPORT_SYMBOL(tcp_v4_send_check);
565 
566 /*
567  *      This routine will send an RST to the other tcp.
568  *
569  *      Someone asks: why I NEVER use socket parameters (TOS, TTL etc.)
570  *                    for reset.
571  *      Answer: if a packet caused RST, it is not for a socket
572  *              existing in our system, if it is matched to a socket,
573  *              it is just duplicate segment or bug in other side's TCP.
574  *              So that we build reply only basing on parameters
575  *              arrived with segment.
576  *      Exception: precedence violation. We do not implement it in any case.
577  */
578 
579 static void tcp_v4_send_reset(const struct sock *sk, struct sk_buff *skb)
580 {
581         const struct tcphdr *th = tcp_hdr(skb);
582         struct {
583                 struct tcphdr th;
584 #ifdef CONFIG_TCP_MD5SIG
585                 __be32 opt[(TCPOLEN_MD5SIG_ALIGNED >> 2)];
586 #endif
587         } rep;
588         struct ip_reply_arg arg;
589 #ifdef CONFIG_TCP_MD5SIG
590         struct tcp_md5sig_key *key;
591         const __u8 *hash_location = NULL;
592         unsigned char newhash[16];
593         int genhash;
594         struct sock *sk1 = NULL;
595 #endif
596         struct net *net;
597 
598         /* Never send a reset in response to a reset. */
599         if (th->rst)
600                 return;
601 
602         /* If sk not NULL, it means we did a successful lookup and incoming
603          * route had to be correct. prequeue might have dropped our dst.
604          */
605         if (!sk && skb_rtable(skb)->rt_type != RTN_LOCAL)
606                 return;
607 
608         /* Swap the send and the receive. */
609         memset(&rep, 0, sizeof(rep));
610         rep.th.dest   = th->source;
611         rep.th.source = th->dest;
612         rep.th.doff   = sizeof(struct tcphdr) / 4;
613         rep.th.rst    = 1;
614 
615         if (th->ack) {
616                 rep.th.seq = th->ack_seq;
617         } else {
618                 rep.th.ack = 1;
619                 rep.th.ack_seq = htonl(ntohl(th->seq) + th->syn + th->fin +
620                                        skb->len - (th->doff << 2));
621         }
622 
623         memset(&arg, 0, sizeof(arg));
624         arg.iov[0].iov_base = (unsigned char *)&rep;
625         arg.iov[0].iov_len  = sizeof(rep.th);
626 
627         net = sk ? sock_net(sk) : dev_net(skb_dst(skb)->dev);
628 #ifdef CONFIG_TCP_MD5SIG
629         hash_location = tcp_parse_md5sig_option(th);
630         if (!sk && hash_location) {
631                 /*
632                  * active side is lost. Try to find listening socket through
633                  * source port, and then find md5 key through listening socket.
634                  * we are not loose security here:
635                  * Incoming packet is checked with md5 hash with finding key,
636                  * no RST generated if md5 hash doesn't match.
637                  */
638                 sk1 = __inet_lookup_listener(net,
639                                              &tcp_hashinfo, ip_hdr(skb)->saddr,
640                                              th->source, ip_hdr(skb)->daddr,
641                                              ntohs(th->source), inet_iif(skb));
642                 /* don't send rst if it can't find key */
643                 if (!sk1)
644                         return;
645                 rcu_read_lock();
646                 key = tcp_md5_do_lookup(sk1, (union tcp_md5_addr *)
647                                         &ip_hdr(skb)->saddr, AF_INET);
648                 if (!key)
649                         goto release_sk1;
650 
651                 genhash = tcp_v4_md5_hash_skb(newhash, key, NULL, skb);
652                 if (genhash || memcmp(hash_location, newhash, 16) != 0)
653                         goto release_sk1;
654         } else {
655                 key = sk ? tcp_md5_do_lookup(sk, (union tcp_md5_addr *)
656                                              &ip_hdr(skb)->saddr,
657                                              AF_INET) : NULL;
658         }
659 
660         if (key) {
661                 rep.opt[0] = htonl((TCPOPT_NOP << 24) |
662                                    (TCPOPT_NOP << 16) |
663                                    (TCPOPT_MD5SIG << 8) |
664                                    TCPOLEN_MD5SIG);
665                 /* Update length and the length the header thinks exists */
666                 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
667                 rep.th.doff = arg.iov[0].iov_len / 4;
668 
669                 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[1],
670                                      key, ip_hdr(skb)->saddr,
671                                      ip_hdr(skb)->daddr, &rep.th);
672         }
673 #endif
674         arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
675                                       ip_hdr(skb)->saddr, /* XXX */
676                                       arg.iov[0].iov_len, IPPROTO_TCP, 0);
677         arg.csumoffset = offsetof(struct tcphdr, check) / 2;
678         arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0;
679         /* When socket is gone, all binding information is lost.
680          * routing might fail in this case. No choice here, if we choose to force
681          * input interface, we will misroute in case of asymmetric route.
682          */
683         if (sk)
684                 arg.bound_dev_if = sk->sk_bound_dev_if;
685 
686         arg.tos = ip_hdr(skb)->tos;
687         ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
688                               skb, &TCP_SKB_CB(skb)->header.h4.opt,
689                               ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
690                               &arg, arg.iov[0].iov_len);
691 
692         TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
693         TCP_INC_STATS_BH(net, TCP_MIB_OUTRSTS);
694 
695 #ifdef CONFIG_TCP_MD5SIG
696 release_sk1:
697         if (sk1) {
698                 rcu_read_unlock();
699                 sock_put(sk1);
700         }
701 #endif
702 }
703 
704 /* The code following below sending ACKs in SYN-RECV and TIME-WAIT states
705    outside socket context is ugly, certainly. What can I do?
706  */
707 
708 static void tcp_v4_send_ack(struct sk_buff *skb, u32 seq, u32 ack,
709                             u32 win, u32 tsval, u32 tsecr, int oif,
710                             struct tcp_md5sig_key *key,
711                             int reply_flags, u8 tos)
712 {
713         const struct tcphdr *th = tcp_hdr(skb);
714         struct {
715                 struct tcphdr th;
716                 __be32 opt[(TCPOLEN_TSTAMP_ALIGNED >> 2)
717 #ifdef CONFIG_TCP_MD5SIG
718                            + (TCPOLEN_MD5SIG_ALIGNED >> 2)
719 #endif
720                         ];
721         } rep;
722         struct ip_reply_arg arg;
723         struct net *net = dev_net(skb_dst(skb)->dev);
724 
725         memset(&rep.th, 0, sizeof(struct tcphdr));
726         memset(&arg, 0, sizeof(arg));
727 
728         arg.iov[0].iov_base = (unsigned char *)&rep;
729         arg.iov[0].iov_len  = sizeof(rep.th);
730         if (tsecr) {
731                 rep.opt[0] = htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
732                                    (TCPOPT_TIMESTAMP << 8) |
733                                    TCPOLEN_TIMESTAMP);
734                 rep.opt[1] = htonl(tsval);
735                 rep.opt[2] = htonl(tsecr);
736                 arg.iov[0].iov_len += TCPOLEN_TSTAMP_ALIGNED;
737         }
738 
739         /* Swap the send and the receive. */
740         rep.th.dest    = th->source;
741         rep.th.source  = th->dest;
742         rep.th.doff    = arg.iov[0].iov_len / 4;
743         rep.th.seq     = htonl(seq);
744         rep.th.ack_seq = htonl(ack);
745         rep.th.ack     = 1;
746         rep.th.window  = htons(win);
747 
748 #ifdef CONFIG_TCP_MD5SIG
749         if (key) {
750                 int offset = (tsecr) ? 3 : 0;
751 
752                 rep.opt[offset++] = htonl((TCPOPT_NOP << 24) |
753                                           (TCPOPT_NOP << 16) |
754                                           (TCPOPT_MD5SIG << 8) |
755                                           TCPOLEN_MD5SIG);
756                 arg.iov[0].iov_len += TCPOLEN_MD5SIG_ALIGNED;
757                 rep.th.doff = arg.iov[0].iov_len/4;
758 
759                 tcp_v4_md5_hash_hdr((__u8 *) &rep.opt[offset],
760                                     key, ip_hdr(skb)->saddr,
761                                     ip_hdr(skb)->daddr, &rep.th);
762         }
763 #endif
764         arg.flags = reply_flags;
765         arg.csum = csum_tcpudp_nofold(ip_hdr(skb)->daddr,
766                                       ip_hdr(skb)->saddr, /* XXX */
767                                       arg.iov[0].iov_len, IPPROTO_TCP, 0);
768         arg.csumoffset = offsetof(struct tcphdr, check) / 2;
769         if (oif)
770                 arg.bound_dev_if = oif;
771         arg.tos = tos;
772         ip_send_unicast_reply(*this_cpu_ptr(net->ipv4.tcp_sk),
773                               skb, &TCP_SKB_CB(skb)->header.h4.opt,
774                               ip_hdr(skb)->saddr, ip_hdr(skb)->daddr,
775                               &arg, arg.iov[0].iov_len);
776 
777         TCP_INC_STATS_BH(net, TCP_MIB_OUTSEGS);
778 }
779 
780 static void tcp_v4_timewait_ack(struct sock *sk, struct sk_buff *skb)
781 {
782         struct inet_timewait_sock *tw = inet_twsk(sk);
783         struct tcp_timewait_sock *tcptw = tcp_twsk(sk);
784 
785         tcp_v4_send_ack(skb, tcptw->tw_snd_nxt, tcptw->tw_rcv_nxt,
786                         tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
787                         tcp_time_stamp + tcptw->tw_ts_offset,
788                         tcptw->tw_ts_recent,
789                         tw->tw_bound_dev_if,
790                         tcp_twsk_md5_key(tcptw),
791                         tw->tw_transparent ? IP_REPLY_ARG_NOSRCCHECK : 0,
792                         tw->tw_tos
793                         );
794 
795         inet_twsk_put(tw);
796 }
797 
798 static void tcp_v4_reqsk_send_ack(const struct sock *sk, struct sk_buff *skb,
799                                   struct request_sock *req)
800 {
801         /* sk->sk_state == TCP_LISTEN -> for regular TCP_SYN_RECV
802          * sk->sk_state == TCP_SYN_RECV -> for Fast Open.
803          */
804         tcp_v4_send_ack(skb, (sk->sk_state == TCP_LISTEN) ?
805                         tcp_rsk(req)->snt_isn + 1 : tcp_sk(sk)->snd_nxt,
806                         tcp_rsk(req)->rcv_nxt, req->rsk_rcv_wnd,
807                         tcp_time_stamp,
808                         req->ts_recent,
809                         0,
810                         tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&ip_hdr(skb)->daddr,
811                                           AF_INET),
812                         inet_rsk(req)->no_srccheck ? IP_REPLY_ARG_NOSRCCHECK : 0,
813                         ip_hdr(skb)->tos);
814 }
815 
816 /*
817  *      Send a SYN-ACK after having received a SYN.
818  *      This still operates on a request_sock only, not on a big
819  *      socket.
820  */
821 static int tcp_v4_send_synack(const struct sock *sk, struct dst_entry *dst,
822                               struct flowi *fl,
823                               struct request_sock *req,
824                               struct tcp_fastopen_cookie *foc,
825                                   bool attach_req)
826 {
827         const struct inet_request_sock *ireq = inet_rsk(req);
828         struct flowi4 fl4;
829         int err = -1;
830         struct sk_buff *skb;
831 
832         /* First, grab a route. */
833         if (!dst && (dst = inet_csk_route_req(sk, &fl4, req)) == NULL)
834                 return -1;
835 
836         skb = tcp_make_synack(sk, dst, req, foc, attach_req);
837 
838         if (skb) {
839                 __tcp_v4_send_check(skb, ireq->ir_loc_addr, ireq->ir_rmt_addr);
840 
841                 err = ip_build_and_send_pkt(skb, sk, ireq->ir_loc_addr,
842                                             ireq->ir_rmt_addr,
843                                             ireq->opt);
844                 err = net_xmit_eval(err);
845         }
846 
847         return err;
848 }
849 
850 /*
851  *      IPv4 request_sock destructor.
852  */
853 static void tcp_v4_reqsk_destructor(struct request_sock *req)
854 {
855         kfree(inet_rsk(req)->opt);
856 }
857 
858 
859 #ifdef CONFIG_TCP_MD5SIG
860 /*
861  * RFC2385 MD5 checksumming requires a mapping of
862  * IP address->MD5 Key.
863  * We need to maintain these in the sk structure.
864  */
865 
866 /* Find the Key structure for an address.  */
867 struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
868                                          const union tcp_md5_addr *addr,
869                                          int family)
870 {
871         const struct tcp_sock *tp = tcp_sk(sk);
872         struct tcp_md5sig_key *key;
873         unsigned int size = sizeof(struct in_addr);
874         const struct tcp_md5sig_info *md5sig;
875 
876         /* caller either holds rcu_read_lock() or socket lock */
877         md5sig = rcu_dereference_check(tp->md5sig_info,
878                                        sock_owned_by_user(sk) ||
879                                        lockdep_is_held((spinlock_t *)&sk->sk_lock.slock));
880         if (!md5sig)
881                 return NULL;
882 #if IS_ENABLED(CONFIG_IPV6)
883         if (family == AF_INET6)
884                 size = sizeof(struct in6_addr);
885 #endif
886         hlist_for_each_entry_rcu(key, &md5sig->head, node) {
887                 if (key->family != family)
888                         continue;
889                 if (!memcmp(&key->addr, addr, size))
890                         return key;
891         }
892         return NULL;
893 }
894 EXPORT_SYMBOL(tcp_md5_do_lookup);
895 
896 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
897                                          const struct sock *addr_sk)
898 {
899         const union tcp_md5_addr *addr;
900 
901         addr = (const union tcp_md5_addr *)&addr_sk->sk_daddr;
902         return tcp_md5_do_lookup(sk, addr, AF_INET);
903 }
904 EXPORT_SYMBOL(tcp_v4_md5_lookup);
905 
906 /* This can be called on a newly created socket, from other files */
907 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
908                    int family, const u8 *newkey, u8 newkeylen, gfp_t gfp)
909 {
910         /* Add Key to the list */
911         struct tcp_md5sig_key *key;
912         struct tcp_sock *tp = tcp_sk(sk);
913         struct tcp_md5sig_info *md5sig;
914 
915         key = tcp_md5_do_lookup(sk, addr, family);
916         if (key) {
917                 /* Pre-existing entry - just update that one. */
918                 memcpy(key->key, newkey, newkeylen);
919                 key->keylen = newkeylen;
920                 return 0;
921         }
922 
923         md5sig = rcu_dereference_protected(tp->md5sig_info,
924                                            sock_owned_by_user(sk) ||
925                                            lockdep_is_held(&sk->sk_lock.slock));
926         if (!md5sig) {
927                 md5sig = kmalloc(sizeof(*md5sig), gfp);
928                 if (!md5sig)
929                         return -ENOMEM;
930 
931                 sk_nocaps_add(sk, NETIF_F_GSO_MASK);
932                 INIT_HLIST_HEAD(&md5sig->head);
933                 rcu_assign_pointer(tp->md5sig_info, md5sig);
934         }
935 
936         key = sock_kmalloc(sk, sizeof(*key), gfp);
937         if (!key)
938                 return -ENOMEM;
939         if (!tcp_alloc_md5sig_pool()) {
940                 sock_kfree_s(sk, key, sizeof(*key));
941                 return -ENOMEM;
942         }
943 
944         memcpy(key->key, newkey, newkeylen);
945         key->keylen = newkeylen;
946         key->family = family;
947         memcpy(&key->addr, addr,
948                (family == AF_INET6) ? sizeof(struct in6_addr) :
949                                       sizeof(struct in_addr));
950         hlist_add_head_rcu(&key->node, &md5sig->head);
951         return 0;
952 }
953 EXPORT_SYMBOL(tcp_md5_do_add);
954 
955 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr, int family)
956 {
957         struct tcp_md5sig_key *key;
958 
959         key = tcp_md5_do_lookup(sk, addr, family);
960         if (!key)
961                 return -ENOENT;
962         hlist_del_rcu(&key->node);
963         atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
964         kfree_rcu(key, rcu);
965         return 0;
966 }
967 EXPORT_SYMBOL(tcp_md5_do_del);
968 
969 static void tcp_clear_md5_list(struct sock *sk)
970 {
971         struct tcp_sock *tp = tcp_sk(sk);
972         struct tcp_md5sig_key *key;
973         struct hlist_node *n;
974         struct tcp_md5sig_info *md5sig;
975 
976         md5sig = rcu_dereference_protected(tp->md5sig_info, 1);
977 
978         hlist_for_each_entry_safe(key, n, &md5sig->head, node) {
979                 hlist_del_rcu(&key->node);
980                 atomic_sub(sizeof(*key), &sk->sk_omem_alloc);
981                 kfree_rcu(key, rcu);
982         }
983 }
984 
985 static int tcp_v4_parse_md5_keys(struct sock *sk, char __user *optval,
986                                  int optlen)
987 {
988         struct tcp_md5sig cmd;
989         struct sockaddr_in *sin = (struct sockaddr_in *)&cmd.tcpm_addr;
990 
991         if (optlen < sizeof(cmd))
992                 return -EINVAL;
993 
994         if (copy_from_user(&cmd, optval, sizeof(cmd)))
995                 return -EFAULT;
996 
997         if (sin->sin_family != AF_INET)
998                 return -EINVAL;
999 
1000         if (!cmd.tcpm_keylen)
1001                 return tcp_md5_do_del(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1002                                       AF_INET);
1003 
1004         if (cmd.tcpm_keylen > TCP_MD5SIG_MAXKEYLEN)
1005                 return -EINVAL;
1006 
1007         return tcp_md5_do_add(sk, (union tcp_md5_addr *)&sin->sin_addr.s_addr,
1008                               AF_INET, cmd.tcpm_key, cmd.tcpm_keylen,
1009                               GFP_KERNEL);
1010 }
1011 
1012 static int tcp_v4_md5_hash_pseudoheader(struct tcp_md5sig_pool *hp,
1013                                         __be32 daddr, __be32 saddr, int nbytes)
1014 {
1015         struct tcp4_pseudohdr *bp;
1016         struct scatterlist sg;
1017 
1018         bp = &hp->md5_blk.ip4;
1019 
1020         /*
1021          * 1. the TCP pseudo-header (in the order: source IP address,
1022          * destination IP address, zero-padded protocol number, and
1023          * segment length)
1024          */
1025         bp->saddr = saddr;
1026         bp->daddr = daddr;
1027         bp->pad = 0;
1028         bp->protocol = IPPROTO_TCP;
1029         bp->len = cpu_to_be16(nbytes);
1030 
1031         sg_init_one(&sg, bp, sizeof(*bp));
1032         return crypto_hash_update(&hp->md5_desc, &sg, sizeof(*bp));
1033 }
1034 
1035 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
1036                                __be32 daddr, __be32 saddr, const struct tcphdr *th)
1037 {
1038         struct tcp_md5sig_pool *hp;
1039         struct hash_desc *desc;
1040 
1041         hp = tcp_get_md5sig_pool();
1042         if (!hp)
1043                 goto clear_hash_noput;
1044         desc = &hp->md5_desc;
1045 
1046         if (crypto_hash_init(desc))
1047                 goto clear_hash;
1048         if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, th->doff << 2))
1049                 goto clear_hash;
1050         if (tcp_md5_hash_header(hp, th))
1051                 goto clear_hash;
1052         if (tcp_md5_hash_key(hp, key))
1053                 goto clear_hash;
1054         if (crypto_hash_final(desc, md5_hash))
1055                 goto clear_hash;
1056 
1057         tcp_put_md5sig_pool();
1058         return 0;
1059 
1060 clear_hash:
1061         tcp_put_md5sig_pool();
1062 clear_hash_noput:
1063         memset(md5_hash, 0, 16);
1064         return 1;
1065 }
1066 
1067 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1068                         const struct sock *sk,
1069                         const struct sk_buff *skb)
1070 {
1071         struct tcp_md5sig_pool *hp;
1072         struct hash_desc *desc;
1073         const struct tcphdr *th = tcp_hdr(skb);
1074         __be32 saddr, daddr;
1075 
1076         if (sk) { /* valid for establish/request sockets */
1077                 saddr = sk->sk_rcv_saddr;
1078                 daddr = sk->sk_daddr;
1079         } else {
1080                 const struct iphdr *iph = ip_hdr(skb);
1081                 saddr = iph->saddr;
1082                 daddr = iph->daddr;
1083         }
1084 
1085         hp = tcp_get_md5sig_pool();
1086         if (!hp)
1087                 goto clear_hash_noput;
1088         desc = &hp->md5_desc;
1089 
1090         if (crypto_hash_init(desc))
1091                 goto clear_hash;
1092 
1093         if (tcp_v4_md5_hash_pseudoheader(hp, daddr, saddr, skb->len))
1094                 goto clear_hash;
1095         if (tcp_md5_hash_header(hp, th))
1096                 goto clear_hash;
1097         if (tcp_md5_hash_skb_data(hp, skb, th->doff << 2))
1098                 goto clear_hash;
1099         if (tcp_md5_hash_key(hp, key))
1100                 goto clear_hash;
1101         if (crypto_hash_final(desc, md5_hash))
1102                 goto clear_hash;
1103 
1104         tcp_put_md5sig_pool();
1105         return 0;
1106 
1107 clear_hash:
1108         tcp_put_md5sig_pool();
1109 clear_hash_noput:
1110         memset(md5_hash, 0, 16);
1111         return 1;
1112 }
1113 EXPORT_SYMBOL(tcp_v4_md5_hash_skb);
1114 
1115 #endif
1116 
1117 /* Called with rcu_read_lock() */
1118 static bool tcp_v4_inbound_md5_hash(const struct sock *sk,
1119                                     const struct sk_buff *skb)
1120 {
1121 #ifdef CONFIG_TCP_MD5SIG
1122         /*
1123          * This gets called for each TCP segment that arrives
1124          * so we want to be efficient.
1125          * We have 3 drop cases:
1126          * o No MD5 hash and one expected.
1127          * o MD5 hash and we're not expecting one.
1128          * o MD5 hash and its wrong.
1129          */
1130         const __u8 *hash_location = NULL;
1131         struct tcp_md5sig_key *hash_expected;
1132         const struct iphdr *iph = ip_hdr(skb);
1133         const struct tcphdr *th = tcp_hdr(skb);
1134         int genhash;
1135         unsigned char newhash[16];
1136 
1137         hash_expected = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&iph->saddr,
1138                                           AF_INET);
1139         hash_location = tcp_parse_md5sig_option(th);
1140 
1141         /* We've parsed the options - do we have a hash? */
1142         if (!hash_expected && !hash_location)
1143                 return false;
1144 
1145         if (hash_expected && !hash_location) {
1146                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5NOTFOUND);
1147                 return true;
1148         }
1149 
1150         if (!hash_expected && hash_location) {
1151                 NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPMD5UNEXPECTED);
1152                 return true;
1153         }
1154 
1155         /* Okay, so this is hash_expected and hash_location -
1156          * so we need to calculate the checksum.
1157          */
1158         genhash = tcp_v4_md5_hash_skb(newhash,
1159                                       hash_expected,
1160                                       NULL, skb);
1161 
1162         if (genhash || memcmp(hash_location, newhash, 16) != 0) {
1163                 net_info_ratelimited("MD5 Hash failed for (%pI4, %d)->(%pI4, %d)%s\n",
1164                                      &iph->saddr, ntohs(th->source),
1165                                      &iph->daddr, ntohs(th->dest),
1166                                      genhash ? " tcp_v4_calc_md5_hash failed"
1167                                      : "");
1168                 return true;
1169         }
1170         return false;
1171 #endif
1172         return false;
1173 }
1174 
1175 static void tcp_v4_init_req(struct request_sock *req,
1176                             const struct sock *sk_listener,
1177                             struct sk_buff *skb)
1178 {
1179         struct inet_request_sock *ireq = inet_rsk(req);
1180 
1181         sk_rcv_saddr_set(req_to_sk(req), ip_hdr(skb)->daddr);
1182         sk_daddr_set(req_to_sk(req), ip_hdr(skb)->saddr);
1183         ireq->no_srccheck = inet_sk(sk_listener)->transparent;
1184         ireq->opt = tcp_v4_save_options(skb);
1185 }
1186 
1187 static struct dst_entry *tcp_v4_route_req(const struct sock *sk,
1188                                           struct flowi *fl,
1189                                           const struct request_sock *req,
1190                                           bool *strict)
1191 {
1192         struct dst_entry *dst = inet_csk_route_req(sk, &fl->u.ip4, req);
1193 
1194         if (strict) {
1195                 if (fl->u.ip4.daddr == inet_rsk(req)->ir_rmt_addr)
1196                         *strict = true;
1197                 else
1198                         *strict = false;
1199         }
1200 
1201         return dst;
1202 }
1203 
1204 struct request_sock_ops tcp_request_sock_ops __read_mostly = {
1205         .family         =       PF_INET,
1206         .obj_size       =       sizeof(struct tcp_request_sock),
1207         .rtx_syn_ack    =       tcp_rtx_synack,
1208         .send_ack       =       tcp_v4_reqsk_send_ack,
1209         .destructor     =       tcp_v4_reqsk_destructor,
1210         .send_reset     =       tcp_v4_send_reset,
1211         .syn_ack_timeout =      tcp_syn_ack_timeout,
1212 };
1213 
1214 static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
1215         .mss_clamp      =       TCP_MSS_DEFAULT,
1216 #ifdef CONFIG_TCP_MD5SIG
1217         .req_md5_lookup =       tcp_v4_md5_lookup,
1218         .calc_md5_hash  =       tcp_v4_md5_hash_skb,
1219 #endif
1220         .init_req       =       tcp_v4_init_req,
1221 #ifdef CONFIG_SYN_COOKIES
1222         .cookie_init_seq =      cookie_v4_init_sequence,
1223 #endif
1224         .route_req      =       tcp_v4_route_req,
1225         .init_seq       =       tcp_v4_init_sequence,
1226         .send_synack    =       tcp_v4_send_synack,
1227 };
1228 
1229 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
1230 {
1231         /* Never answer to SYNs send to broadcast or multicast */
1232         if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
1233                 goto drop;
1234 
1235         return tcp_conn_request(&tcp_request_sock_ops,
1236                                 &tcp_request_sock_ipv4_ops, sk, skb);
1237 
1238 drop:
1239         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1240         return 0;
1241 }
1242 EXPORT_SYMBOL(tcp_v4_conn_request);
1243 
1244 
1245 /*
1246  * The three way handshake has completed - we got a valid synack -
1247  * now create the new socket.
1248  */
1249 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
1250                                   struct request_sock *req,
1251                                   struct dst_entry *dst,
1252                                   struct request_sock *req_unhash,
1253                                   bool *own_req)
1254 {
1255         struct inet_request_sock *ireq;
1256         struct inet_sock *newinet;
1257         struct tcp_sock *newtp;
1258         struct sock *newsk;
1259 #ifdef CONFIG_TCP_MD5SIG
1260         struct tcp_md5sig_key *key;
1261 #endif
1262         struct ip_options_rcu *inet_opt;
1263 
1264         if (sk_acceptq_is_full(sk))
1265                 goto exit_overflow;
1266 
1267         newsk = tcp_create_openreq_child(sk, req, skb);
1268         if (!newsk)
1269                 goto exit_nonewsk;
1270 
1271         newsk->sk_gso_type = SKB_GSO_TCPV4;
1272         inet_sk_rx_dst_set(newsk, skb);
1273 
1274         newtp                 = tcp_sk(newsk);
1275         newinet               = inet_sk(newsk);
1276         ireq                  = inet_rsk(req);
1277         sk_daddr_set(newsk, ireq->ir_rmt_addr);
1278         sk_rcv_saddr_set(newsk, ireq->ir_loc_addr);
1279         newinet->inet_saddr           = ireq->ir_loc_addr;
1280         inet_opt              = ireq->opt;
1281         rcu_assign_pointer(newinet->inet_opt, inet_opt);
1282         ireq->opt             = NULL;
1283         newinet->mc_index     = inet_iif(skb);
1284         newinet->mc_ttl       = ip_hdr(skb)->ttl;
1285         newinet->rcv_tos      = ip_hdr(skb)->tos;
1286         inet_csk(newsk)->icsk_ext_hdr_len = 0;
1287         if (inet_opt)
1288                 inet_csk(newsk)->icsk_ext_hdr_len = inet_opt->opt.optlen;
1289         newinet->inet_id = newtp->write_seq ^ jiffies;
1290 
1291         if (!dst) {
1292                 dst = inet_csk_route_child_sock(sk, newsk, req);
1293                 if (!dst)
1294                         goto put_and_exit;
1295         } else {
1296                 /* syncookie case : see end of cookie_v4_check() */
1297         }
1298         sk_setup_caps(newsk, dst);
1299 
1300         tcp_ca_openreq_child(newsk, dst);
1301 
1302         tcp_sync_mss(newsk, dst_mtu(dst));
1303         newtp->advmss = dst_metric_advmss(dst);
1304         if (tcp_sk(sk)->rx_opt.user_mss &&
1305             tcp_sk(sk)->rx_opt.user_mss < newtp->advmss)
1306                 newtp->advmss = tcp_sk(sk)->rx_opt.user_mss;
1307 
1308         tcp_initialize_rcv_mss(newsk);
1309 
1310 #ifdef CONFIG_TCP_MD5SIG
1311         /* Copy over the MD5 key from the original socket */
1312         key = tcp_md5_do_lookup(sk, (union tcp_md5_addr *)&newinet->inet_daddr,
1313                                 AF_INET);
1314         if (key) {
1315                 /*
1316                  * We're using one, so create a matching key
1317                  * on the newsk structure. If we fail to get
1318                  * memory, then we end up not copying the key
1319                  * across. Shucks.
1320                  */
1321                 tcp_md5_do_add(newsk, (union tcp_md5_addr *)&newinet->inet_daddr,
1322                                AF_INET, key->key, key->keylen, GFP_ATOMIC);
1323                 sk_nocaps_add(newsk, NETIF_F_GSO_MASK);
1324         }
1325 #endif
1326 
1327         if (__inet_inherit_port(sk, newsk) < 0)
1328                 goto put_and_exit;
1329         *own_req = inet_ehash_nolisten(newsk, req_to_sk(req_unhash));
1330         if (*own_req)
1331                 tcp_move_syn(newtp, req);
1332 
1333         return newsk;
1334 
1335 exit_overflow:
1336         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1337 exit_nonewsk:
1338         dst_release(dst);
1339 exit:
1340         NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
1341         return NULL;
1342 put_and_exit:
1343         inet_csk_prepare_forced_close(newsk);
1344         tcp_done(newsk);
1345         goto exit;
1346 }
1347 EXPORT_SYMBOL(tcp_v4_syn_recv_sock);
1348 
1349 static struct sock *tcp_v4_cookie_check(struct sock *sk, struct sk_buff *skb)
1350 {
1351 #ifdef CONFIG_SYN_COOKIES
1352         const struct tcphdr *th = tcp_hdr(skb);
1353 
1354         if (!th->syn)
1355                 sk = cookie_v4_check(sk, skb);
1356 #endif
1357         return sk;
1358 }
1359 
1360 /* The socket must have it's spinlock held when we get
1361  * here, unless it is a TCP_LISTEN socket.
1362  *
1363  * We have a potential double-lock case here, so even when
1364  * doing backlog processing we use the BH locking scheme.
1365  * This is because we cannot sleep with the original spinlock
1366  * held.
1367  */
1368 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
1369 {
1370         struct sock *rsk;
1371 
1372         if (sk->sk_state == TCP_ESTABLISHED) { /* Fast path */
1373                 struct dst_entry *dst = sk->sk_rx_dst;
1374 
1375                 sock_rps_save_rxhash(sk, skb);
1376                 sk_mark_napi_id(sk, skb);
1377                 if (dst) {
1378                         if (inet_sk(sk)->rx_dst_ifindex != skb->skb_iif ||
1379                             !dst->ops->check(dst, 0)) {
1380                                 dst_release(dst);
1381                                 sk->sk_rx_dst = NULL;
1382                         }
1383                 }
1384                 tcp_rcv_established(sk, skb, tcp_hdr(skb), skb->len);
1385                 return 0;
1386         }
1387 
1388         if (tcp_checksum_complete(skb))
1389                 goto csum_err;
1390 
1391         if (sk->sk_state == TCP_LISTEN) {
1392                 struct sock *nsk = tcp_v4_cookie_check(sk, skb);
1393 
1394                 if (!nsk)
1395                         goto discard;
1396                 if (nsk != sk) {
1397                         sock_rps_save_rxhash(nsk, skb);
1398                         sk_mark_napi_id(nsk, skb);
1399                         if (tcp_child_process(sk, nsk, skb)) {
1400                                 rsk = nsk;
1401                                 goto reset;
1402                         }
1403                         return 0;
1404                 }
1405         } else
1406                 sock_rps_save_rxhash(sk, skb);
1407 
1408         if (tcp_rcv_state_process(sk, skb)) {
1409                 rsk = sk;
1410                 goto reset;
1411         }
1412         return 0;
1413 
1414 reset:
1415         tcp_v4_send_reset(rsk, skb);
1416 discard:
1417         kfree_skb(skb);
1418         /* Be careful here. If this function gets more complicated and
1419          * gcc suffers from register pressure on the x86, sk (in %ebx)
1420          * might be destroyed here. This current version compiles correctly,
1421          * but you have been warned.
1422          */
1423         return 0;
1424 
1425 csum_err:
1426         TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_CSUMERRORS);
1427         TCP_INC_STATS_BH(sock_net(sk), TCP_MIB_INERRS);
1428         goto discard;
1429 }
1430 EXPORT_SYMBOL(tcp_v4_do_rcv);
1431 
1432 void tcp_v4_early_demux(struct sk_buff *skb)
1433 {
1434         const struct iphdr *iph;
1435         const struct tcphdr *th;
1436         struct sock *sk;
1437 
1438         if (skb->pkt_type != PACKET_HOST)
1439                 return;
1440 
1441         if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct tcphdr)))
1442                 return;
1443 
1444         iph = ip_hdr(skb);
1445         th = tcp_hdr(skb);
1446 
1447         if (th->doff < sizeof(struct tcphdr) / 4)
1448                 return;
1449 
1450         sk = __inet_lookup_established(dev_net(skb->dev), &tcp_hashinfo,
1451                                        iph->saddr, th->source,
1452                                        iph->daddr, ntohs(th->dest),
1453                                        skb->skb_iif);
1454         if (sk) {
1455                 skb->sk = sk;
1456                 skb->destructor = sock_edemux;
1457                 if (sk_fullsock(sk)) {
1458                         struct dst_entry *dst = READ_ONCE(sk->sk_rx_dst);
1459 
1460                         if (dst)
1461                                 dst = dst_check(dst, 0);
1462                         if (dst &&
1463                             inet_sk(sk)->rx_dst_ifindex == skb->skb_iif)
1464                                 skb_dst_set_noref(skb, dst);
1465                 }
1466         }
1467 }
1468 
1469 /* Packet is added to VJ-style prequeue for processing in process
1470  * context, if a reader task is waiting. Apparently, this exciting
1471  * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1472  * failed somewhere. Latency? Burstiness? Well, at least now we will
1473  * see, why it failed. 8)8)                               --ANK
1474  *
1475  */
1476 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1477 {
1478         struct tcp_sock *tp = tcp_sk(sk);
1479 
1480         if (sysctl_tcp_low_latency || !tp->ucopy.task)
1481                 return false;
1482 
1483         if (skb->len <= tcp_hdrlen(skb) &&
1484             skb_queue_len(&tp->ucopy.prequeue) == 0)
1485                 return false;
1486 
1487         /* Before escaping RCU protected region, we need to take care of skb
1488          * dst. Prequeue is only enabled for established sockets.
1489          * For such sockets, we might need the skb dst only to set sk->sk_rx_dst
1490          * Instead of doing full sk_rx_dst validity here, let's perform
1491          * an optimistic check.
1492          */
1493         if (likely(sk->sk_rx_dst))
1494                 skb_dst_drop(skb);
1495         else
1496                 skb_dst_force_safe(skb);
1497 
1498         __skb_queue_tail(&tp->ucopy.prequeue, skb);
1499         tp->ucopy.memory += skb->truesize;
1500         if (tp->ucopy.memory > sk->sk_rcvbuf) {
1501                 struct sk_buff *skb1;
1502 
1503                 BUG_ON(sock_owned_by_user(sk));
1504 
1505                 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1506                         sk_backlog_rcv(sk, skb1);
1507                         NET_INC_STATS_BH(sock_net(sk),
1508                                          LINUX_MIB_TCPPREQUEUEDROPPED);
1509                 }
1510 
1511                 tp->ucopy.memory = 0;
1512         } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1513                 wake_up_interruptible_sync_poll(sk_sleep(sk),
1514                                            POLLIN | POLLRDNORM | POLLRDBAND);
1515                 if (!inet_csk_ack_scheduled(sk))
1516                         inet_csk_reset_xmit_timer(sk, ICSK_TIME_DACK,
1517                                                   (3 * tcp_rto_min(sk)) / 4,
1518                                                   TCP_RTO_MAX);
1519         }
1520         return true;
1521 }
1522 EXPORT_SYMBOL(tcp_prequeue);
1523 
1524 /*
1525  *      From tcp_input.c
1526  */
1527 
1528 int tcp_v4_rcv(struct sk_buff *skb)
1529 {
1530         const struct iphdr *iph;
1531         const struct tcphdr *th;
1532         struct sock *sk;
1533         int ret;
1534         struct net *net = dev_net(skb->dev);
1535 
1536         if (skb->pkt_type != PACKET_HOST)
1537                 goto discard_it;
1538 
1539         /* Count it even if it's bad */
1540         TCP_INC_STATS_BH(net, TCP_MIB_INSEGS);
1541 
1542         if (!pskb_may_pull(skb, sizeof(struct tcphdr)))
1543                 goto discard_it;
1544 
1545         th = tcp_hdr(skb);
1546 
1547         if (th->doff < sizeof(struct tcphdr) / 4)
1548                 goto bad_packet;
1549         if (!pskb_may_pull(skb, th->doff * 4))
1550                 goto discard_it;
1551 
1552         /* An explanation is required here, I think.
1553          * Packet length and doff are validated by header prediction,
1554          * provided case of th->doff==0 is eliminated.
1555          * So, we defer the checks. */
1556 
1557         if (skb_checksum_init(skb, IPPROTO_TCP, inet_compute_pseudo))
1558                 goto csum_error;
1559 
1560         th = tcp_hdr(skb);
1561         iph = ip_hdr(skb);
1562         /* This is tricky : We move IPCB at its correct location into TCP_SKB_CB()
1563          * barrier() makes sure compiler wont play fool^Waliasing games.
1564          */
1565         memmove(&TCP_SKB_CB(skb)->header.h4, IPCB(skb),
1566                 sizeof(struct inet_skb_parm));
1567         barrier();
1568 
1569         TCP_SKB_CB(skb)->seq = ntohl(th->seq);
1570         TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
1571                                     skb->len - th->doff * 4);
1572         TCP_SKB_CB(skb)->ack_seq = ntohl(th->ack_seq);
1573         TCP_SKB_CB(skb)->tcp_flags = tcp_flag_byte(th);
1574         TCP_SKB_CB(skb)->tcp_tw_isn = 0;
1575         TCP_SKB_CB(skb)->ip_dsfield = ipv4_get_dsfield(iph);
1576         TCP_SKB_CB(skb)->sacked  = 0;
1577 
1578 lookup:
1579         sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
1580         if (!sk)
1581                 goto no_tcp_socket;
1582 
1583 process:
1584         if (sk->sk_state == TCP_TIME_WAIT)
1585                 goto do_time_wait;
1586 
1587         if (sk->sk_state == TCP_NEW_SYN_RECV) {
1588                 struct request_sock *req = inet_reqsk(sk);
1589                 struct sock *nsk = NULL;
1590 
1591                 sk = req->rsk_listener;
1592                 if (tcp_v4_inbound_md5_hash(sk, skb))
1593                         goto discard_and_relse;
1594                 if (likely(sk->sk_state == TCP_LISTEN)) {
1595                         nsk = tcp_check_req(sk, skb, req, false);
1596                 } else {
1597                         inet_csk_reqsk_queue_drop_and_put(sk, req);
1598                         goto lookup;
1599                 }
1600                 if (!nsk) {
1601                         reqsk_put(req);
1602                         goto discard_it;
1603                 }
1604                 if (nsk == sk) {
1605                         sock_hold(sk);
1606                         reqsk_put(req);
1607                 } else if (tcp_child_process(sk, nsk, skb)) {
1608                         tcp_v4_send_reset(nsk, skb);
1609                         goto discard_it;
1610                 } else {
1611                         return 0;
1612                 }
1613         }
1614         if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
1615                 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
1616                 goto discard_and_relse;
1617         }
1618 
1619         if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb))
1620                 goto discard_and_relse;
1621 
1622         if (tcp_v4_inbound_md5_hash(sk, skb))
1623                 goto discard_and_relse;
1624 
1625         nf_reset(skb);
1626 
1627         if (sk_filter(sk, skb))
1628                 goto discard_and_relse;
1629 
1630         skb->dev = NULL;
1631 
1632         if (sk->sk_state == TCP_LISTEN) {
1633                 ret = tcp_v4_do_rcv(sk, skb);
1634                 goto put_and_return;
1635         }
1636 
1637         sk_incoming_cpu_update(sk);
1638 
1639         bh_lock_sock_nested(sk);
1640         tcp_sk(sk)->segs_in += max_t(u16, 1, skb_shinfo(skb)->gso_segs);
1641         ret = 0;
1642         if (!sock_owned_by_user(sk)) {
1643                 if (!tcp_prequeue(sk, skb))
1644                         ret = tcp_v4_do_rcv(sk, skb);
1645         } else if (unlikely(sk_add_backlog(sk, skb,
1646                                            sk->sk_rcvbuf + sk->sk_sndbuf))) {
1647                 bh_unlock_sock(sk);
1648                 NET_INC_STATS_BH(net, LINUX_MIB_TCPBACKLOGDROP);
1649                 goto discard_and_relse;
1650         }
1651         bh_unlock_sock(sk);
1652 
1653 put_and_return:
1654         sock_put(sk);
1655 
1656         return ret;
1657 
1658 no_tcp_socket:
1659         if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
1660                 goto discard_it;
1661 
1662         if (tcp_checksum_complete(skb)) {
1663 csum_error:
1664                 TCP_INC_STATS_BH(net, TCP_MIB_CSUMERRORS);
1665 bad_packet:
1666                 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
1667         } else {
1668                 tcp_v4_send_reset(NULL, skb);
1669         }
1670 
1671 discard_it:
1672         /* Discard frame. */
1673         kfree_skb(skb);
1674         return 0;
1675 
1676 discard_and_relse:
1677         sock_put(sk);
1678         goto discard_it;
1679 
1680 do_time_wait:
1681         if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb)) {
1682                 inet_twsk_put(inet_twsk(sk));
1683                 goto discard_it;
1684         }
1685 
1686         if (tcp_checksum_complete(skb)) {
1687                 inet_twsk_put(inet_twsk(sk));
1688                 goto csum_error;
1689         }
1690         switch (tcp_timewait_state_process(inet_twsk(sk), skb, th)) {
1691         case TCP_TW_SYN: {
1692                 struct sock *sk2 = inet_lookup_listener(dev_net(skb->dev),
1693                                                         &tcp_hashinfo,
1694                                                         iph->saddr, th->source,
1695                                                         iph->daddr, th->dest,
1696                                                         inet_iif(skb));
1697                 if (sk2) {
1698                         inet_twsk_deschedule_put(inet_twsk(sk));
1699                         sk = sk2;
1700                         goto process;
1701                 }
1702                 /* Fall through to ACK */
1703         }
1704         case TCP_TW_ACK:
1705                 tcp_v4_timewait_ack(sk, skb);
1706                 break;
1707         case TCP_TW_RST:
1708                 goto no_tcp_socket;
1709         case TCP_TW_SUCCESS:;
1710         }
1711         goto discard_it;
1712 }
1713 
1714 static struct timewait_sock_ops tcp_timewait_sock_ops = {
1715         .twsk_obj_size  = sizeof(struct tcp_timewait_sock),
1716         .twsk_unique    = tcp_twsk_unique,
1717         .twsk_destructor= tcp_twsk_destructor,
1718 };
1719 
1720 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
1721 {
1722         struct dst_entry *dst = skb_dst(skb);
1723 
1724         if (dst && dst_hold_safe(dst)) {
1725                 sk->sk_rx_dst = dst;
1726                 inet_sk(sk)->rx_dst_ifindex = skb->skb_iif;
1727         }
1728 }
1729 EXPORT_SYMBOL(inet_sk_rx_dst_set);
1730 
1731 const struct inet_connection_sock_af_ops ipv4_specific = {
1732         .queue_xmit        = ip_queue_xmit,
1733         .send_check        = tcp_v4_send_check,
1734         .rebuild_header    = inet_sk_rebuild_header,
1735         .sk_rx_dst_set     = inet_sk_rx_dst_set,
1736         .conn_request      = tcp_v4_conn_request,
1737         .syn_recv_sock     = tcp_v4_syn_recv_sock,
1738         .net_header_len    = sizeof(struct iphdr),
1739         .setsockopt        = ip_setsockopt,
1740         .getsockopt        = ip_getsockopt,
1741         .addr2sockaddr     = inet_csk_addr2sockaddr,
1742         .sockaddr_len      = sizeof(struct sockaddr_in),
1743         .bind_conflict     = inet_csk_bind_conflict,
1744 #ifdef CONFIG_COMPAT
1745         .compat_setsockopt = compat_ip_setsockopt,
1746         .compat_getsockopt = compat_ip_getsockopt,
1747 #endif
1748         .mtu_reduced       = tcp_v4_mtu_reduced,
1749 };
1750 EXPORT_SYMBOL(ipv4_specific);
1751 
1752 #ifdef CONFIG_TCP_MD5SIG
1753 static const struct tcp_sock_af_ops tcp_sock_ipv4_specific = {
1754         .md5_lookup             = tcp_v4_md5_lookup,
1755         .calc_md5_hash          = tcp_v4_md5_hash_skb,
1756         .md5_parse              = tcp_v4_parse_md5_keys,
1757 };
1758 #endif
1759 
1760 /* NOTE: A lot of things set to zero explicitly by call to
1761  *       sk_alloc() so need not be done here.
1762  */
1763 static int tcp_v4_init_sock(struct sock *sk)
1764 {
1765         struct inet_connection_sock *icsk = inet_csk(sk);
1766 
1767         tcp_init_sock(sk);
1768 
1769         icsk->icsk_af_ops = &ipv4_specific;
1770 
1771 #ifdef CONFIG_TCP_MD5SIG
1772         tcp_sk(sk)->af_specific = &tcp_sock_ipv4_specific;
1773 #endif
1774 
1775         return 0;
1776 }
1777 
1778 void tcp_v4_destroy_sock(struct sock *sk)
1779 {
1780         struct tcp_sock *tp = tcp_sk(sk);
1781 
1782         tcp_clear_xmit_timers(sk);
1783 
1784         tcp_cleanup_congestion_control(sk);
1785 
1786         /* Cleanup up the write buffer. */
1787         tcp_write_queue_purge(sk);
1788 
1789         /* Cleans up our, hopefully empty, out_of_order_queue. */
1790         __skb_queue_purge(&tp->out_of_order_queue);
1791 
1792 #ifdef CONFIG_TCP_MD5SIG
1793         /* Clean up the MD5 key list, if any */
1794         if (tp->md5sig_info) {
1795                 tcp_clear_md5_list(sk);
1796                 kfree_rcu(tp->md5sig_info, rcu);
1797                 tp->md5sig_info = NULL;
1798         }
1799 #endif
1800 
1801         /* Clean prequeue, it must be empty really */
1802         __skb_queue_purge(&tp->ucopy.prequeue);
1803 
1804         /* Clean up a referenced TCP bind bucket. */
1805         if (inet_csk(sk)->icsk_bind_hash)
1806                 inet_put_port(sk);
1807 
1808         BUG_ON(tp->fastopen_rsk);
1809 
1810         /* If socket is aborted during connect operation */
1811         tcp_free_fastopen_req(tp);
1812         tcp_saved_syn_free(tp);
1813 
1814         sk_sockets_allocated_dec(sk);
1815         sock_release_memcg(sk);
1816 }
1817 EXPORT_SYMBOL(tcp_v4_destroy_sock);
1818 
1819 #ifdef CONFIG_PROC_FS
1820 /* Proc filesystem TCP sock list dumping. */
1821 
1822 /*
1823  * Get next listener socket follow cur.  If cur is NULL, get first socket
1824  * starting from bucket given in st->bucket; when st->bucket is zero the
1825  * very first socket in the hash table is returned.
1826  */
1827 static void *listening_get_next(struct seq_file *seq, void *cur)
1828 {
1829         struct inet_connection_sock *icsk;
1830         struct hlist_nulls_node *node;
1831         struct sock *sk = cur;
1832         struct inet_listen_hashbucket *ilb;
1833         struct tcp_iter_state *st = seq->private;
1834         struct net *net = seq_file_net(seq);
1835 
1836         if (!sk) {
1837                 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1838                 spin_lock_bh(&ilb->lock);
1839                 sk = sk_nulls_head(&ilb->head);
1840                 st->offset = 0;
1841                 goto get_sk;
1842         }
1843         ilb = &tcp_hashinfo.listening_hash[st->bucket];
1844         ++st->num;
1845         ++st->offset;
1846 
1847         sk = sk_nulls_next(sk);
1848 get_sk:
1849         sk_nulls_for_each_from(sk, node) {
1850                 if (!net_eq(sock_net(sk), net))
1851                         continue;
1852                 if (sk->sk_family == st->family) {
1853                         cur = sk;
1854                         goto out;
1855                 }
1856                 icsk = inet_csk(sk);
1857         }
1858         spin_unlock_bh(&ilb->lock);
1859         st->offset = 0;
1860         if (++st->bucket < INET_LHTABLE_SIZE) {
1861                 ilb = &tcp_hashinfo.listening_hash[st->bucket];
1862                 spin_lock_bh(&ilb->lock);
1863                 sk = sk_nulls_head(&ilb->head);
1864                 goto get_sk;
1865         }
1866         cur = NULL;
1867 out:
1868         return cur;
1869 }
1870 
1871 static void *listening_get_idx(struct seq_file *seq, loff_t *pos)
1872 {
1873         struct tcp_iter_state *st = seq->private;
1874         void *rc;
1875 
1876         st->bucket = 0;
1877         st->offset = 0;
1878         rc = listening_get_next(seq, NULL);
1879 
1880         while (rc && *pos) {
1881                 rc = listening_get_next(seq, rc);
1882                 --*pos;
1883         }
1884         return rc;
1885 }
1886 
1887 static inline bool empty_bucket(const struct tcp_iter_state *st)
1888 {
1889         return hlist_nulls_empty(&tcp_hashinfo.ehash[st->bucket].chain);
1890 }
1891 
1892 /*
1893  * Get first established socket starting from bucket given in st->bucket.
1894  * If st->bucket is zero, the very first socket in the hash is returned.
1895  */
1896 static void *established_get_first(struct seq_file *seq)
1897 {
1898         struct tcp_iter_state *st = seq->private;
1899         struct net *net = seq_file_net(seq);
1900         void *rc = NULL;
1901 
1902         st->offset = 0;
1903         for (; st->bucket <= tcp_hashinfo.ehash_mask; ++st->bucket) {
1904                 struct sock *sk;
1905                 struct hlist_nulls_node *node;
1906                 spinlock_t *lock = inet_ehash_lockp(&tcp_hashinfo, st->bucket);
1907 
1908                 /* Lockless fast path for the common case of empty buckets */
1909                 if (empty_bucket(st))
1910                         continue;
1911 
1912                 spin_lock_bh(lock);
1913                 sk_nulls_for_each(sk, node, &tcp_hashinfo.ehash[st->bucket].chain) {
1914                         if (sk->sk_family != st->family ||
1915                             !net_eq(sock_net(sk), net)) {
1916                                 continue;
1917                         }
1918                         rc = sk;
1919                         goto out;
1920                 }
1921                 spin_unlock_bh(lock);
1922         }
1923 out:
1924         return rc;
1925 }
1926 
1927 static void *established_get_next(struct seq_file *seq, void *cur)
1928 {
1929         struct sock *sk = cur;
1930         struct hlist_nulls_node *node;
1931         struct tcp_iter_state *st = seq->private;
1932         struct net *net = seq_file_net(seq);
1933 
1934         ++st->num;
1935         ++st->offset;
1936 
1937         sk = sk_nulls_next(sk);
1938 
1939         sk_nulls_for_each_from(sk, node) {
1940                 if (sk->sk_family == st->family && net_eq(sock_net(sk), net))
1941                         return sk;
1942         }
1943 
1944         spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
1945         ++st->bucket;
1946         return established_get_first(seq);
1947 }
1948 
1949 static void *established_get_idx(struct seq_file *seq, loff_t pos)
1950 {
1951         struct tcp_iter_state *st = seq->private;
1952         void *rc;
1953 
1954         st->bucket = 0;
1955         rc = established_get_first(seq);
1956 
1957         while (rc && pos) {
1958                 rc = established_get_next(seq, rc);
1959                 --pos;
1960         }
1961         return rc;
1962 }
1963 
1964 static void *tcp_get_idx(struct seq_file *seq, loff_t pos)
1965 {
1966         void *rc;
1967         struct tcp_iter_state *st = seq->private;
1968 
1969         st->state = TCP_SEQ_STATE_LISTENING;
1970         rc        = listening_get_idx(seq, &pos);
1971 
1972         if (!rc) {
1973                 st->state = TCP_SEQ_STATE_ESTABLISHED;
1974                 rc        = established_get_idx(seq, pos);
1975         }
1976 
1977         return rc;
1978 }
1979 
1980 static void *tcp_seek_last_pos(struct seq_file *seq)
1981 {
1982         struct tcp_iter_state *st = seq->private;
1983         int offset = st->offset;
1984         int orig_num = st->num;
1985         void *rc = NULL;
1986 
1987         switch (st->state) {
1988         case TCP_SEQ_STATE_LISTENING:
1989                 if (st->bucket >= INET_LHTABLE_SIZE)
1990                         break;
1991                 st->state = TCP_SEQ_STATE_LISTENING;
1992                 rc = listening_get_next(seq, NULL);
1993                 while (offset-- && rc)
1994                         rc = listening_get_next(seq, rc);
1995                 if (rc)
1996                         break;
1997                 st->bucket = 0;
1998                 st->state = TCP_SEQ_STATE_ESTABLISHED;
1999                 /* Fallthrough */
2000         case TCP_SEQ_STATE_ESTABLISHED:
2001                 if (st->bucket > tcp_hashinfo.ehash_mask)
2002                         break;
2003                 rc = established_get_first(seq);
2004                 while (offset-- && rc)
2005                         rc = established_get_next(seq, rc);
2006         }
2007 
2008         st->num = orig_num;
2009 
2010         return rc;
2011 }
2012 
2013 static void *tcp_seq_start(struct seq_file *seq, loff_t *pos)
2014 {
2015         struct tcp_iter_state *st = seq->private;
2016         void *rc;
2017 
2018         if (*pos && *pos == st->last_pos) {
2019                 rc = tcp_seek_last_pos(seq);
2020                 if (rc)
2021                         goto out;
2022         }
2023 
2024         st->state = TCP_SEQ_STATE_LISTENING;
2025         st->num = 0;
2026         st->bucket = 0;
2027         st->offset = 0;
2028         rc = *pos ? tcp_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
2029 
2030 out:
2031         st->last_pos = *pos;
2032         return rc;
2033 }
2034 
2035 static void *tcp_seq_next(struct seq_file *seq, void *v, loff_t *pos)
2036 {
2037         struct tcp_iter_state *st = seq->private;
2038         void *rc = NULL;
2039 
2040         if (v == SEQ_START_TOKEN) {
2041                 rc = tcp_get_idx(seq, 0);
2042                 goto out;
2043         }
2044 
2045         switch (st->state) {
2046         case TCP_SEQ_STATE_LISTENING:
2047                 rc = listening_get_next(seq, v);
2048                 if (!rc) {
2049                         st->state = TCP_SEQ_STATE_ESTABLISHED;
2050                         st->bucket = 0;
2051                         st->offset = 0;
2052                         rc        = established_get_first(seq);
2053                 }
2054                 break;
2055         case TCP_SEQ_STATE_ESTABLISHED:
2056                 rc = established_get_next(seq, v);
2057                 break;
2058         }
2059 out:
2060         ++*pos;
2061         st->last_pos = *pos;
2062         return rc;
2063 }
2064 
2065 static void tcp_seq_stop(struct seq_file *seq, void *v)
2066 {
2067         struct tcp_iter_state *st = seq->private;
2068 
2069         switch (st->state) {
2070         case TCP_SEQ_STATE_LISTENING:
2071                 if (v != SEQ_START_TOKEN)
2072                         spin_unlock_bh(&tcp_hashinfo.listening_hash[st->bucket].lock);
2073                 break;
2074         case TCP_SEQ_STATE_ESTABLISHED:
2075                 if (v)
2076                         spin_unlock_bh(inet_ehash_lockp(&tcp_hashinfo, st->bucket));
2077                 break;
2078         }
2079 }
2080 
2081 int tcp_seq_open(struct inode *inode, struct file *file)
2082 {
2083         struct tcp_seq_afinfo *afinfo = PDE_DATA(inode);
2084         struct tcp_iter_state *s;
2085         int err;
2086 
2087         err = seq_open_net(inode, file, &afinfo->seq_ops,
2088                           sizeof(struct tcp_iter_state));
2089         if (err < 0)
2090                 return err;
2091 
2092         s = ((struct seq_file *)file->private_data)->private;
2093         s->family               = afinfo->family;
2094         s->last_pos             = 0;
2095         return 0;
2096 }
2097 EXPORT_SYMBOL(tcp_seq_open);
2098 
2099 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo)
2100 {
2101         int rc = 0;
2102         struct proc_dir_entry *p;
2103 
2104         afinfo->seq_ops.start           = tcp_seq_start;
2105         afinfo->seq_ops.next            = tcp_seq_next;
2106         afinfo->seq_ops.stop            = tcp_seq_stop;
2107 
2108         p = proc_create_data(afinfo->name, S_IRUGO, net->proc_net,
2109                              afinfo->seq_fops, afinfo);
2110         if (!p)
2111                 rc = -ENOMEM;
2112         return rc;
2113 }
2114 EXPORT_SYMBOL(tcp_proc_register);
2115 
2116 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo)
2117 {
2118         remove_proc_entry(afinfo->name, net->proc_net);
2119 }
2120 EXPORT_SYMBOL(tcp_proc_unregister);
2121 
2122 static void get_openreq4(const struct request_sock *req,
2123                          struct seq_file *f, int i)
2124 {
2125         const struct inet_request_sock *ireq = inet_rsk(req);
2126         long delta = req->rsk_timer.expires - jiffies;
2127 
2128         seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2129                 " %02X %08X:%08X %02X:%08lX %08X %5u %8d %u %d %pK",
2130                 i,
2131                 ireq->ir_loc_addr,
2132                 ireq->ir_num,
2133                 ireq->ir_rmt_addr,
2134                 ntohs(ireq->ir_rmt_port),
2135                 TCP_SYN_RECV,
2136                 0, 0, /* could print option size, but that is af dependent. */
2137                 1,    /* timers active (only the expire timer) */
2138                 jiffies_delta_to_clock_t(delta),
2139                 req->num_timeout,
2140                 from_kuid_munged(seq_user_ns(f),
2141                                  sock_i_uid(req->rsk_listener)),
2142                 0,  /* non standard timer */
2143                 0, /* open_requests have no inode */
2144                 0,
2145                 req);
2146 }
2147 
2148 static void get_tcp4_sock(struct sock *sk, struct seq_file *f, int i)
2149 {
2150         int timer_active;
2151         unsigned long timer_expires;
2152         const struct tcp_sock *tp = tcp_sk(sk);
2153         const struct inet_connection_sock *icsk = inet_csk(sk);
2154         const struct inet_sock *inet = inet_sk(sk);
2155         const struct fastopen_queue *fastopenq = &icsk->icsk_accept_queue.fastopenq;
2156         __be32 dest = inet->inet_daddr;
2157         __be32 src = inet->inet_rcv_saddr;
2158         __u16 destp = ntohs(inet->inet_dport);
2159         __u16 srcp = ntohs(inet->inet_sport);
2160         int rx_queue;
2161         int state;
2162 
2163         if (icsk->icsk_pending == ICSK_TIME_RETRANS ||
2164             icsk->icsk_pending == ICSK_TIME_EARLY_RETRANS ||
2165             icsk->icsk_pending == ICSK_TIME_LOSS_PROBE) {
2166                 timer_active    = 1;
2167                 timer_expires   = icsk->icsk_timeout;
2168         } else if (icsk->icsk_pending == ICSK_TIME_PROBE0) {
2169                 timer_active    = 4;
2170                 timer_expires   = icsk->icsk_timeout;
2171         } else if (timer_pending(&sk->sk_timer)) {
2172                 timer_active    = 2;
2173                 timer_expires   = sk->sk_timer.expires;
2174         } else {
2175                 timer_active    = 0;
2176                 timer_expires = jiffies;
2177         }
2178 
2179         state = sk_state_load(sk);
2180         if (state == TCP_LISTEN)
2181                 rx_queue = sk->sk_ack_backlog;
2182         else
2183                 /* Because we don't lock the socket,
2184                  * we might find a transient negative value.
2185                  */
2186                 rx_queue = max_t(int, tp->rcv_nxt - tp->copied_seq, 0);
2187 
2188         seq_printf(f, "%4d: %08X:%04X %08X:%04X %02X %08X:%08X %02X:%08lX "
2189                         "%08X %5u %8d %lu %d %pK %lu %lu %u %u %d",
2190                 i, src, srcp, dest, destp, state,
2191                 tp->write_seq - tp->snd_una,
2192                 rx_queue,
2193                 timer_active,
2194                 jiffies_delta_to_clock_t(timer_expires - jiffies),
2195                 icsk->icsk_retransmits,
2196                 from_kuid_munged(seq_user_ns(f), sock_i_uid(sk)),
2197                 icsk->icsk_probes_out,
2198                 sock_i_ino(sk),
2199                 atomic_read(&sk->sk_refcnt), sk,
2200                 jiffies_to_clock_t(icsk->icsk_rto),
2201                 jiffies_to_clock_t(icsk->icsk_ack.ato),
2202                 (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong,
2203                 tp->snd_cwnd,
2204                 state == TCP_LISTEN ?
2205                     fastopenq->max_qlen :
2206                     (tcp_in_initial_slowstart(tp) ? -1 : tp->snd_ssthresh));
2207 }
2208 
2209 static void get_timewait4_sock(const struct inet_timewait_sock *tw,
2210                                struct seq_file *f, int i)
2211 {
2212         long delta = tw->tw_timer.expires - jiffies;
2213         __be32 dest, src;
2214         __u16 destp, srcp;
2215 
2216         dest  = tw->tw_daddr;
2217         src   = tw->tw_rcv_saddr;
2218         destp = ntohs(tw->tw_dport);
2219         srcp  = ntohs(tw->tw_sport);
2220 
2221         seq_printf(f, "%4d: %08X:%04X %08X:%04X"
2222                 " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %pK",
2223                 i, src, srcp, dest, destp, tw->tw_substate, 0, 0,
2224                 3, jiffies_delta_to_clock_t(delta), 0, 0, 0, 0,
2225                 atomic_read(&tw->tw_refcnt), tw);
2226 }
2227 
2228 #define TMPSZ 150
2229 
2230 static int tcp4_seq_show(struct seq_file *seq, void *v)
2231 {
2232         struct tcp_iter_state *st;
2233         struct sock *sk = v;
2234 
2235         seq_setwidth(seq, TMPSZ - 1);
2236         if (v == SEQ_START_TOKEN) {
2237                 seq_puts(seq, "  sl  local_address rem_address   st tx_queue "
2238                            "rx_queue tr tm->when retrnsmt   uid  timeout "
2239                            "inode");
2240                 goto out;
2241         }
2242         st = seq->private;
2243 
2244         if (sk->sk_state == TCP_TIME_WAIT)
2245                 get_timewait4_sock(v, seq, st->num);
2246         else if (sk->sk_state == TCP_NEW_SYN_RECV)
2247                 get_openreq4(v, seq, st->num);
2248         else
2249                 get_tcp4_sock(v, seq, st->num);
2250 out:
2251         seq_pad(seq, '\n');
2252         return 0;
2253 }
2254 
2255 static const struct file_operations tcp_afinfo_seq_fops = {
2256         .owner   = THIS_MODULE,
2257         .open    = tcp_seq_open,
2258         .read    = seq_read,
2259         .llseek  = seq_lseek,
2260         .release = seq_release_net
2261 };
2262 
2263 static struct tcp_seq_afinfo tcp4_seq_afinfo = {
2264         .name           = "tcp",
2265         .family         = AF_INET,
2266         .seq_fops       = &tcp_afinfo_seq_fops,
2267         .seq_ops        = {
2268                 .show           = tcp4_seq_show,
2269         },
2270 };
2271 
2272 static int __net_init tcp4_proc_init_net(struct net *net)
2273 {
2274         return tcp_proc_register(net, &tcp4_seq_afinfo);
2275 }
2276 
2277 static void __net_exit tcp4_proc_exit_net(struct net *net)
2278 {
2279         tcp_proc_unregister(net, &tcp4_seq_afinfo);
2280 }
2281 
2282 static struct pernet_operations tcp4_net_ops = {
2283         .init = tcp4_proc_init_net,
2284         .exit = tcp4_proc_exit_net,
2285 };
2286 
2287 int __init tcp4_proc_init(void)
2288 {
2289         return register_pernet_subsys(&tcp4_net_ops);
2290 }
2291 
2292 void tcp4_proc_exit(void)
2293 {
2294         unregister_pernet_subsys(&tcp4_net_ops);
2295 }
2296 #endif /* CONFIG_PROC_FS */
2297 
2298 struct proto tcp_prot = {
2299         .name                   = "TCP",
2300         .owner                  = THIS_MODULE,
2301         .close                  = tcp_close,
2302         .connect                = tcp_v4_connect,
2303         .disconnect             = tcp_disconnect,
2304         .accept                 = inet_csk_accept,
2305         .ioctl                  = tcp_ioctl,
2306         .init                   = tcp_v4_init_sock,
2307         .destroy                = tcp_v4_destroy_sock,
2308         .shutdown               = tcp_shutdown,
2309         .setsockopt             = tcp_setsockopt,
2310         .getsockopt             = tcp_getsockopt,
2311         .recvmsg                = tcp_recvmsg,
2312         .sendmsg                = tcp_sendmsg,
2313         .sendpage               = tcp_sendpage,
2314         .backlog_rcv            = tcp_v4_do_rcv,
2315         .release_cb             = tcp_release_cb,
2316         .hash                   = inet_hash,
2317         .unhash                 = inet_unhash,
2318         .get_port               = inet_csk_get_port,
2319         .enter_memory_pressure  = tcp_enter_memory_pressure,
2320         .stream_memory_free     = tcp_stream_memory_free,
2321         .sockets_allocated      = &tcp_sockets_allocated,
2322         .orphan_count           = &tcp_orphan_count,
2323         .memory_allocated       = &tcp_memory_allocated,
2324         .memory_pressure        = &tcp_memory_pressure,
2325         .sysctl_mem             = sysctl_tcp_mem,
2326         .sysctl_wmem            = sysctl_tcp_wmem,
2327         .sysctl_rmem            = sysctl_tcp_rmem,
2328         .max_header             = MAX_TCP_HEADER,
2329         .obj_size               = sizeof(struct tcp_sock),
2330         .slab_flags             = SLAB_DESTROY_BY_RCU,
2331         .twsk_prot              = &tcp_timewait_sock_ops,
2332         .rsk_prot               = &tcp_request_sock_ops,
2333         .h.hashinfo             = &tcp_hashinfo,
2334         .no_autobind            = true,
2335 #ifdef CONFIG_COMPAT
2336         .compat_setsockopt      = compat_tcp_setsockopt,
2337         .compat_getsockopt      = compat_tcp_getsockopt,
2338 #endif
2339 #ifdef CONFIG_MEMCG_KMEM
2340         .init_cgroup            = tcp_init_cgroup,
2341         .destroy_cgroup         = tcp_destroy_cgroup,
2342         .proto_cgroup           = tcp_proto_cgroup,
2343 #endif
2344 };
2345 EXPORT_SYMBOL(tcp_prot);
2346 
2347 static void __net_exit tcp_sk_exit(struct net *net)
2348 {
2349         int cpu;
2350 
2351         for_each_possible_cpu(cpu)
2352                 inet_ctl_sock_destroy(*per_cpu_ptr(net->ipv4.tcp_sk, cpu));
2353         free_percpu(net->ipv4.tcp_sk);
2354 }
2355 
2356 static int __net_init tcp_sk_init(struct net *net)
2357 {
2358         int res, cpu;
2359 
2360         net->ipv4.tcp_sk = alloc_percpu(struct sock *);
2361         if (!net->ipv4.tcp_sk)
2362                 return -ENOMEM;
2363 
2364         for_each_possible_cpu(cpu) {
2365                 struct sock *sk;
2366 
2367                 res = inet_ctl_sock_create(&sk, PF_INET, SOCK_RAW,
2368                                            IPPROTO_TCP, net);
2369                 if (res)
2370                         goto fail;
2371                 *per_cpu_ptr(net->ipv4.tcp_sk, cpu) = sk;
2372         }
2373 
2374         net->ipv4.sysctl_tcp_ecn = 2;
2375         net->ipv4.sysctl_tcp_ecn_fallback = 1;
2376 
2377         net->ipv4.sysctl_tcp_base_mss = TCP_BASE_MSS;
2378         net->ipv4.sysctl_tcp_probe_threshold = TCP_PROBE_THRESHOLD;
2379         net->ipv4.sysctl_tcp_probe_interval = TCP_PROBE_INTERVAL;
2380 
2381         return 0;
2382 fail:
2383         tcp_sk_exit(net);
2384 
2385         return res;
2386 }
2387 
2388 static void __net_exit tcp_sk_exit_batch(struct list_head *net_exit_list)
2389 {
2390         inet_twsk_purge(&tcp_hashinfo, &tcp_death_row, AF_INET);
2391 }
2392 
2393 static struct pernet_operations __net_initdata tcp_sk_ops = {
2394        .init       = tcp_sk_init,
2395        .exit       = tcp_sk_exit,
2396        .exit_batch = tcp_sk_exit_batch,
2397 };
2398 
2399 void __init tcp_v4_init(void)
2400 {
2401         inet_hashinfo_init(&tcp_hashinfo);
2402         if (register_pernet_subsys(&tcp_sk_ops))
2403                 panic("Failed to create the TCP control socket.\n");
2404 }
2405 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us