Version:  2.0.40 2.2.26 2.4.37 3.12 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5 4.6 4.7 4.8 4.9

Linux/include/net/tcp.h

  1 /*
  2  * INET         An implementation of the TCP/IP protocol suite for the LINUX
  3  *              operating system.  INET is implemented using the  BSD Socket
  4  *              interface as the means of communication with the user level.
  5  *
  6  *              Definitions for the TCP module.
  7  *
  8  * Version:     @(#)tcp.h       1.0.5   05/23/93
  9  *
 10  * Authors:     Ross Biro
 11  *              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 12  *
 13  *              This program is free software; you can redistribute it and/or
 14  *              modify it under the terms of the GNU General Public License
 15  *              as published by the Free Software Foundation; either version
 16  *              2 of the License, or (at your option) any later version.
 17  */
 18 #ifndef _TCP_H
 19 #define _TCP_H
 20 
 21 #define FASTRETRANS_DEBUG 1
 22 
 23 #include <linux/list.h>
 24 #include <linux/tcp.h>
 25 #include <linux/bug.h>
 26 #include <linux/slab.h>
 27 #include <linux/cache.h>
 28 #include <linux/percpu.h>
 29 #include <linux/skbuff.h>
 30 #include <linux/cryptohash.h>
 31 #include <linux/kref.h>
 32 #include <linux/ktime.h>
 33 
 34 #include <net/inet_connection_sock.h>
 35 #include <net/inet_timewait_sock.h>
 36 #include <net/inet_hashtables.h>
 37 #include <net/checksum.h>
 38 #include <net/request_sock.h>
 39 #include <net/sock.h>
 40 #include <net/snmp.h>
 41 #include <net/ip.h>
 42 #include <net/tcp_states.h>
 43 #include <net/inet_ecn.h>
 44 #include <net/dst.h>
 45 
 46 #include <linux/seq_file.h>
 47 #include <linux/memcontrol.h>
 48 
 49 extern struct inet_hashinfo tcp_hashinfo;
 50 
 51 extern struct percpu_counter tcp_orphan_count;
 52 void tcp_time_wait(struct sock *sk, int state, int timeo);
 53 
 54 #define MAX_TCP_HEADER  (128 + MAX_HEADER)
 55 #define MAX_TCP_OPTION_SPACE 40
 56 
 57 /*
 58  * Never offer a window over 32767 without using window scaling. Some
 59  * poor stacks do signed 16bit maths!
 60  */
 61 #define MAX_TCP_WINDOW          32767U
 62 
 63 /* Minimal accepted MSS. It is (60+60+8) - (20+20). */
 64 #define TCP_MIN_MSS             88U
 65 
 66 /* The least MTU to use for probing */
 67 #define TCP_BASE_MSS            1024
 68 
 69 /* probing interval, default to 10 minutes as per RFC4821 */
 70 #define TCP_PROBE_INTERVAL      600
 71 
 72 /* Specify interval when tcp mtu probing will stop */
 73 #define TCP_PROBE_THRESHOLD     8
 74 
 75 /* After receiving this amount of duplicate ACKs fast retransmit starts. */
 76 #define TCP_FASTRETRANS_THRESH 3
 77 
 78 /* Maximal number of ACKs sent quickly to accelerate slow-start. */
 79 #define TCP_MAX_QUICKACKS       16U
 80 
 81 /* urg_data states */
 82 #define TCP_URG_VALID   0x0100
 83 #define TCP_URG_NOTYET  0x0200
 84 #define TCP_URG_READ    0x0400
 85 
 86 #define TCP_RETR1       3       /*
 87                                  * This is how many retries it does before it
 88                                  * tries to figure out if the gateway is
 89                                  * down. Minimal RFC value is 3; it corresponds
 90                                  * to ~3sec-8min depending on RTO.
 91                                  */
 92 
 93 #define TCP_RETR2       15      /*
 94                                  * This should take at least
 95                                  * 90 minutes to time out.
 96                                  * RFC1122 says that the limit is 100 sec.
 97                                  * 15 is ~13-30min depending on RTO.
 98                                  */
 99 
100 #define TCP_SYN_RETRIES  6      /* This is how many retries are done
101                                  * when active opening a connection.
102                                  * RFC1122 says the minimum retry MUST
103                                  * be at least 180secs.  Nevertheless
104                                  * this value is corresponding to
105                                  * 63secs of retransmission with the
106                                  * current initial RTO.
107                                  */
108 
109 #define TCP_SYNACK_RETRIES 5    /* This is how may retries are done
110                                  * when passive opening a connection.
111                                  * This is corresponding to 31secs of
112                                  * retransmission with the current
113                                  * initial RTO.
114                                  */
115 
116 #define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
117                                   * state, about 60 seconds     */
118 #define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
119                                  /* BSD style FIN_WAIT2 deadlock breaker.
120                                   * It used to be 3min, new value is 60sec,
121                                   * to combine FIN-WAIT-2 timeout with
122                                   * TIME-WAIT timer.
123                                   */
124 
125 #define TCP_DELACK_MAX  ((unsigned)(HZ/5))      /* maximal time to delay before sending an ACK */
126 #if HZ >= 100
127 #define TCP_DELACK_MIN  ((unsigned)(HZ/25))     /* minimal time to delay before sending an ACK */
128 #define TCP_ATO_MIN     ((unsigned)(HZ/25))
129 #else
130 #define TCP_DELACK_MIN  4U
131 #define TCP_ATO_MIN     4U
132 #endif
133 #define TCP_RTO_MAX     ((unsigned)(120*HZ))
134 #define TCP_RTO_MIN     ((unsigned)(HZ/5))
135 #define TCP_TIMEOUT_INIT ((unsigned)(1*HZ))     /* RFC6298 2.1 initial RTO value        */
136 #define TCP_TIMEOUT_FALLBACK ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value, now
137                                                  * used as a fallback RTO for the
138                                                  * initial data transmission if no
139                                                  * valid RTT sample has been acquired,
140                                                  * most likely due to retrans in 3WHS.
141                                                  */
142 
143 #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
144                                                          * for local resources.
145                                                          */
146 
147 #define TCP_KEEPALIVE_TIME      (120*60*HZ)     /* two hours */
148 #define TCP_KEEPALIVE_PROBES    9               /* Max of 9 keepalive probes    */
149 #define TCP_KEEPALIVE_INTVL     (75*HZ)
150 
151 #define MAX_TCP_KEEPIDLE        32767
152 #define MAX_TCP_KEEPINTVL       32767
153 #define MAX_TCP_KEEPCNT         127
154 #define MAX_TCP_SYNCNT          127
155 
156 #define TCP_SYNQ_INTERVAL       (HZ/5)  /* Period of SYNACK timer */
157 
158 #define TCP_PAWS_24DAYS (60 * 60 * 24 * 24)
159 #define TCP_PAWS_MSL    60              /* Per-host timestamps are invalidated
160                                          * after this time. It should be equal
161                                          * (or greater than) TCP_TIMEWAIT_LEN
162                                          * to provide reliability equal to one
163                                          * provided by timewait state.
164                                          */
165 #define TCP_PAWS_WINDOW 1               /* Replay window for per-host
166                                          * timestamps. It must be less than
167                                          * minimal timewait lifetime.
168                                          */
169 /*
170  *      TCP option
171  */
172 
173 #define TCPOPT_NOP              1       /* Padding */
174 #define TCPOPT_EOL              0       /* End of options */
175 #define TCPOPT_MSS              2       /* Segment size negotiating */
176 #define TCPOPT_WINDOW           3       /* Window scaling */
177 #define TCPOPT_SACK_PERM        4       /* SACK Permitted */
178 #define TCPOPT_SACK             5       /* SACK Block */
179 #define TCPOPT_TIMESTAMP        8       /* Better RTT estimations/PAWS */
180 #define TCPOPT_MD5SIG           19      /* MD5 Signature (RFC2385) */
181 #define TCPOPT_FASTOPEN         34      /* Fast open (RFC7413) */
182 #define TCPOPT_EXP              254     /* Experimental */
183 /* Magic number to be after the option value for sharing TCP
184  * experimental options. See draft-ietf-tcpm-experimental-options-00.txt
185  */
186 #define TCPOPT_FASTOPEN_MAGIC   0xF989
187 
188 /*
189  *     TCP option lengths
190  */
191 
192 #define TCPOLEN_MSS            4
193 #define TCPOLEN_WINDOW         3
194 #define TCPOLEN_SACK_PERM      2
195 #define TCPOLEN_TIMESTAMP      10
196 #define TCPOLEN_MD5SIG         18
197 #define TCPOLEN_FASTOPEN_BASE  2
198 #define TCPOLEN_EXP_FASTOPEN_BASE  4
199 
200 /* But this is what stacks really send out. */
201 #define TCPOLEN_TSTAMP_ALIGNED          12
202 #define TCPOLEN_WSCALE_ALIGNED          4
203 #define TCPOLEN_SACKPERM_ALIGNED        4
204 #define TCPOLEN_SACK_BASE               2
205 #define TCPOLEN_SACK_BASE_ALIGNED       4
206 #define TCPOLEN_SACK_PERBLOCK           8
207 #define TCPOLEN_MD5SIG_ALIGNED          20
208 #define TCPOLEN_MSS_ALIGNED             4
209 
210 /* Flags in tp->nonagle */
211 #define TCP_NAGLE_OFF           1       /* Nagle's algo is disabled */
212 #define TCP_NAGLE_CORK          2       /* Socket is corked         */
213 #define TCP_NAGLE_PUSH          4       /* Cork is overridden for already queued data */
214 
215 /* TCP thin-stream limits */
216 #define TCP_THIN_LINEAR_RETRIES 6       /* After 6 linear retries, do exp. backoff */
217 
218 /* TCP initial congestion window as per rfc6928 */
219 #define TCP_INIT_CWND           10
220 
221 /* Bit Flags for sysctl_tcp_fastopen */
222 #define TFO_CLIENT_ENABLE       1
223 #define TFO_SERVER_ENABLE       2
224 #define TFO_CLIENT_NO_COOKIE    4       /* Data in SYN w/o cookie option */
225 
226 /* Accept SYN data w/o any cookie option */
227 #define TFO_SERVER_COOKIE_NOT_REQD      0x200
228 
229 /* Force enable TFO on all listeners, i.e., not requiring the
230  * TCP_FASTOPEN socket option.
231  */
232 #define TFO_SERVER_WO_SOCKOPT1  0x400
233 
234 extern struct inet_timewait_death_row tcp_death_row;
235 
236 /* sysctl variables for tcp */
237 extern int sysctl_tcp_timestamps;
238 extern int sysctl_tcp_window_scaling;
239 extern int sysctl_tcp_sack;
240 extern int sysctl_tcp_fastopen;
241 extern int sysctl_tcp_retrans_collapse;
242 extern int sysctl_tcp_stdurg;
243 extern int sysctl_tcp_rfc1337;
244 extern int sysctl_tcp_abort_on_overflow;
245 extern int sysctl_tcp_max_orphans;
246 extern int sysctl_tcp_fack;
247 extern int sysctl_tcp_reordering;
248 extern int sysctl_tcp_max_reordering;
249 extern int sysctl_tcp_dsack;
250 extern long sysctl_tcp_mem[3];
251 extern int sysctl_tcp_wmem[3];
252 extern int sysctl_tcp_rmem[3];
253 extern int sysctl_tcp_app_win;
254 extern int sysctl_tcp_adv_win_scale;
255 extern int sysctl_tcp_tw_reuse;
256 extern int sysctl_tcp_frto;
257 extern int sysctl_tcp_low_latency;
258 extern int sysctl_tcp_nometrics_save;
259 extern int sysctl_tcp_moderate_rcvbuf;
260 extern int sysctl_tcp_tso_win_divisor;
261 extern int sysctl_tcp_workaround_signed_windows;
262 extern int sysctl_tcp_slow_start_after_idle;
263 extern int sysctl_tcp_thin_linear_timeouts;
264 extern int sysctl_tcp_thin_dupack;
265 extern int sysctl_tcp_early_retrans;
266 extern int sysctl_tcp_limit_output_bytes;
267 extern int sysctl_tcp_challenge_ack_limit;
268 extern int sysctl_tcp_min_tso_segs;
269 extern int sysctl_tcp_min_rtt_wlen;
270 extern int sysctl_tcp_autocorking;
271 extern int sysctl_tcp_invalid_ratelimit;
272 extern int sysctl_tcp_pacing_ss_ratio;
273 extern int sysctl_tcp_pacing_ca_ratio;
274 
275 extern atomic_long_t tcp_memory_allocated;
276 extern struct percpu_counter tcp_sockets_allocated;
277 extern int tcp_memory_pressure;
278 
279 /* optimized version of sk_under_memory_pressure() for TCP sockets */
280 static inline bool tcp_under_memory_pressure(const struct sock *sk)
281 {
282         if (mem_cgroup_sockets_enabled && sk->sk_memcg &&
283             mem_cgroup_under_socket_pressure(sk->sk_memcg))
284                 return true;
285 
286         return tcp_memory_pressure;
287 }
288 /*
289  * The next routines deal with comparing 32 bit unsigned ints
290  * and worry about wraparound (automatic with unsigned arithmetic).
291  */
292 
293 static inline bool before(__u32 seq1, __u32 seq2)
294 {
295         return (__s32)(seq1-seq2) < 0;
296 }
297 #define after(seq2, seq1)       before(seq1, seq2)
298 
299 /* is s2<=s1<=s3 ? */
300 static inline bool between(__u32 seq1, __u32 seq2, __u32 seq3)
301 {
302         return seq3 - seq2 >= seq1 - seq2;
303 }
304 
305 static inline bool tcp_out_of_memory(struct sock *sk)
306 {
307         if (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
308             sk_memory_allocated(sk) > sk_prot_mem_limits(sk, 2))
309                 return true;
310         return false;
311 }
312 
313 void sk_forced_mem_schedule(struct sock *sk, int size);
314 
315 static inline bool tcp_too_many_orphans(struct sock *sk, int shift)
316 {
317         struct percpu_counter *ocp = sk->sk_prot->orphan_count;
318         int orphans = percpu_counter_read_positive(ocp);
319 
320         if (orphans << shift > sysctl_tcp_max_orphans) {
321                 orphans = percpu_counter_sum_positive(ocp);
322                 if (orphans << shift > sysctl_tcp_max_orphans)
323                         return true;
324         }
325         return false;
326 }
327 
328 bool tcp_check_oom(struct sock *sk, int shift);
329 
330 
331 extern struct proto tcp_prot;
332 
333 #define TCP_INC_STATS(net, field)       SNMP_INC_STATS((net)->mib.tcp_statistics, field)
334 #define __TCP_INC_STATS(net, field)     __SNMP_INC_STATS((net)->mib.tcp_statistics, field)
335 #define TCP_DEC_STATS(net, field)       SNMP_DEC_STATS((net)->mib.tcp_statistics, field)
336 #define TCP_ADD_STATS(net, field, val)  SNMP_ADD_STATS((net)->mib.tcp_statistics, field, val)
337 
338 void tcp_tasklet_init(void);
339 
340 void tcp_v4_err(struct sk_buff *skb, u32);
341 
342 void tcp_shutdown(struct sock *sk, int how);
343 
344 void tcp_v4_early_demux(struct sk_buff *skb);
345 int tcp_v4_rcv(struct sk_buff *skb);
346 
347 int tcp_v4_tw_remember_stamp(struct inet_timewait_sock *tw);
348 int tcp_sendmsg(struct sock *sk, struct msghdr *msg, size_t size);
349 int tcp_sendpage(struct sock *sk, struct page *page, int offset, size_t size,
350                  int flags);
351 void tcp_release_cb(struct sock *sk);
352 void tcp_wfree(struct sk_buff *skb);
353 void tcp_write_timer_handler(struct sock *sk);
354 void tcp_delack_timer_handler(struct sock *sk);
355 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg);
356 int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb);
357 void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
358                          const struct tcphdr *th, unsigned int len);
359 void tcp_rcv_space_adjust(struct sock *sk);
360 int tcp_twsk_unique(struct sock *sk, struct sock *sktw, void *twp);
361 void tcp_twsk_destructor(struct sock *sk);
362 ssize_t tcp_splice_read(struct socket *sk, loff_t *ppos,
363                         struct pipe_inode_info *pipe, size_t len,
364                         unsigned int flags);
365 
366 static inline void tcp_dec_quickack_mode(struct sock *sk,
367                                          const unsigned int pkts)
368 {
369         struct inet_connection_sock *icsk = inet_csk(sk);
370 
371         if (icsk->icsk_ack.quick) {
372                 if (pkts >= icsk->icsk_ack.quick) {
373                         icsk->icsk_ack.quick = 0;
374                         /* Leaving quickack mode we deflate ATO. */
375                         icsk->icsk_ack.ato   = TCP_ATO_MIN;
376                 } else
377                         icsk->icsk_ack.quick -= pkts;
378         }
379 }
380 
381 #define TCP_ECN_OK              1
382 #define TCP_ECN_QUEUE_CWR       2
383 #define TCP_ECN_DEMAND_CWR      4
384 #define TCP_ECN_SEEN            8
385 
386 enum tcp_tw_status {
387         TCP_TW_SUCCESS = 0,
388         TCP_TW_RST = 1,
389         TCP_TW_ACK = 2,
390         TCP_TW_SYN = 3
391 };
392 
393 
394 enum tcp_tw_status tcp_timewait_state_process(struct inet_timewait_sock *tw,
395                                               struct sk_buff *skb,
396                                               const struct tcphdr *th);
397 struct sock *tcp_check_req(struct sock *sk, struct sk_buff *skb,
398                            struct request_sock *req, bool fastopen);
399 int tcp_child_process(struct sock *parent, struct sock *child,
400                       struct sk_buff *skb);
401 void tcp_enter_loss(struct sock *sk);
402 void tcp_clear_retrans(struct tcp_sock *tp);
403 void tcp_update_metrics(struct sock *sk);
404 void tcp_init_metrics(struct sock *sk);
405 void tcp_metrics_init(void);
406 bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst,
407                         bool paws_check, bool timestamps);
408 bool tcp_remember_stamp(struct sock *sk);
409 bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw);
410 void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst);
411 void tcp_disable_fack(struct tcp_sock *tp);
412 void tcp_close(struct sock *sk, long timeout);
413 void tcp_init_sock(struct sock *sk);
414 unsigned int tcp_poll(struct file *file, struct socket *sock,
415                       struct poll_table_struct *wait);
416 int tcp_getsockopt(struct sock *sk, int level, int optname,
417                    char __user *optval, int __user *optlen);
418 int tcp_setsockopt(struct sock *sk, int level, int optname,
419                    char __user *optval, unsigned int optlen);
420 int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
421                           char __user *optval, int __user *optlen);
422 int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
423                           char __user *optval, unsigned int optlen);
424 void tcp_set_keepalive(struct sock *sk, int val);
425 void tcp_syn_ack_timeout(const struct request_sock *req);
426 int tcp_recvmsg(struct sock *sk, struct msghdr *msg, size_t len, int nonblock,
427                 int flags, int *addr_len);
428 void tcp_parse_options(const struct sk_buff *skb,
429                        struct tcp_options_received *opt_rx,
430                        int estab, struct tcp_fastopen_cookie *foc);
431 const u8 *tcp_parse_md5sig_option(const struct tcphdr *th);
432 
433 /*
434  *      TCP v4 functions exported for the inet6 API
435  */
436 
437 void tcp_v4_send_check(struct sock *sk, struct sk_buff *skb);
438 void tcp_v4_mtu_reduced(struct sock *sk);
439 void tcp_req_err(struct sock *sk, u32 seq, bool abort);
440 int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb);
441 struct sock *tcp_create_openreq_child(const struct sock *sk,
442                                       struct request_sock *req,
443                                       struct sk_buff *skb);
444 void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst);
445 struct sock *tcp_v4_syn_recv_sock(const struct sock *sk, struct sk_buff *skb,
446                                   struct request_sock *req,
447                                   struct dst_entry *dst,
448                                   struct request_sock *req_unhash,
449                                   bool *own_req);
450 int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb);
451 int tcp_v4_connect(struct sock *sk, struct sockaddr *uaddr, int addr_len);
452 int tcp_connect(struct sock *sk);
453 enum tcp_synack_type {
454         TCP_SYNACK_NORMAL,
455         TCP_SYNACK_FASTOPEN,
456         TCP_SYNACK_COOKIE,
457 };
458 struct sk_buff *tcp_make_synack(const struct sock *sk, struct dst_entry *dst,
459                                 struct request_sock *req,
460                                 struct tcp_fastopen_cookie *foc,
461                                 enum tcp_synack_type synack_type);
462 int tcp_disconnect(struct sock *sk, int flags);
463 
464 void tcp_finish_connect(struct sock *sk, struct sk_buff *skb);
465 int tcp_send_rcvq(struct sock *sk, struct msghdr *msg, size_t size);
466 void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
467 
468 /* From syncookies.c */
469 struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
470                                  struct request_sock *req,
471                                  struct dst_entry *dst);
472 int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
473                       u32 cookie);
474 struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
475 #ifdef CONFIG_SYN_COOKIES
476 
477 /* Syncookies use a monotonic timer which increments every 60 seconds.
478  * This counter is used both as a hash input and partially encoded into
479  * the cookie value.  A cookie is only validated further if the delta
480  * between the current counter value and the encoded one is less than this,
481  * i.e. a sent cookie is valid only at most for 2*60 seconds (or less if
482  * the counter advances immediately after a cookie is generated).
483  */
484 #define MAX_SYNCOOKIE_AGE       2
485 #define TCP_SYNCOOKIE_PERIOD    (60 * HZ)
486 #define TCP_SYNCOOKIE_VALID     (MAX_SYNCOOKIE_AGE * TCP_SYNCOOKIE_PERIOD)
487 
488 /* syncookies: remember time of last synqueue overflow
489  * But do not dirty this field too often (once per second is enough)
490  * It is racy as we do not hold a lock, but race is very minor.
491  */
492 static inline void tcp_synq_overflow(const struct sock *sk)
493 {
494         unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
495         unsigned long now = jiffies;
496 
497         if (time_after(now, last_overflow + HZ))
498                 tcp_sk(sk)->rx_opt.ts_recent_stamp = now;
499 }
500 
501 /* syncookies: no recent synqueue overflow on this listening socket? */
502 static inline bool tcp_synq_no_recent_overflow(const struct sock *sk)
503 {
504         unsigned long last_overflow = tcp_sk(sk)->rx_opt.ts_recent_stamp;
505 
506         return time_after(jiffies, last_overflow + TCP_SYNCOOKIE_VALID);
507 }
508 
509 static inline u32 tcp_cookie_time(void)
510 {
511         u64 val = get_jiffies_64();
512 
513         do_div(val, TCP_SYNCOOKIE_PERIOD);
514         return val;
515 }
516 
517 u32 __cookie_v4_init_sequence(const struct iphdr *iph, const struct tcphdr *th,
518                               u16 *mssp);
519 __u32 cookie_v4_init_sequence(const struct sk_buff *skb, __u16 *mss);
520 __u32 cookie_init_timestamp(struct request_sock *req);
521 bool cookie_timestamp_decode(struct tcp_options_received *opt);
522 bool cookie_ecn_ok(const struct tcp_options_received *opt,
523                    const struct net *net, const struct dst_entry *dst);
524 
525 /* From net/ipv6/syncookies.c */
526 int __cookie_v6_check(const struct ipv6hdr *iph, const struct tcphdr *th,
527                       u32 cookie);
528 struct sock *cookie_v6_check(struct sock *sk, struct sk_buff *skb);
529 
530 u32 __cookie_v6_init_sequence(const struct ipv6hdr *iph,
531                               const struct tcphdr *th, u16 *mssp);
532 __u32 cookie_v6_init_sequence(const struct sk_buff *skb, __u16 *mss);
533 #endif
534 /* tcp_output.c */
535 
536 u32 tcp_tso_autosize(const struct sock *sk, unsigned int mss_now,
537                      int min_tso_segs);
538 void __tcp_push_pending_frames(struct sock *sk, unsigned int cur_mss,
539                                int nonagle);
540 bool tcp_may_send_now(struct sock *sk);
541 int __tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
542 int tcp_retransmit_skb(struct sock *sk, struct sk_buff *skb, int segs);
543 void tcp_retransmit_timer(struct sock *sk);
544 void tcp_xmit_retransmit_queue(struct sock *);
545 void tcp_simple_retransmit(struct sock *);
546 int tcp_trim_head(struct sock *, struct sk_buff *, u32);
547 int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int, gfp_t);
548 
549 void tcp_send_probe0(struct sock *);
550 void tcp_send_partial(struct sock *);
551 int tcp_write_wakeup(struct sock *, int mib);
552 void tcp_send_fin(struct sock *sk);
553 void tcp_send_active_reset(struct sock *sk, gfp_t priority);
554 int tcp_send_synack(struct sock *);
555 void tcp_push_one(struct sock *, unsigned int mss_now);
556 void tcp_send_ack(struct sock *sk);
557 void tcp_send_delayed_ack(struct sock *sk);
558 void tcp_send_loss_probe(struct sock *sk);
559 bool tcp_schedule_loss_probe(struct sock *sk);
560 void tcp_skb_collapse_tstamp(struct sk_buff *skb,
561                              const struct sk_buff *next_skb);
562 
563 /* tcp_input.c */
564 void tcp_resume_early_retransmit(struct sock *sk);
565 void tcp_rearm_rto(struct sock *sk);
566 void tcp_synack_rtt_meas(struct sock *sk, struct request_sock *req);
567 void tcp_reset(struct sock *sk);
568 void tcp_skb_mark_lost_uncond_verify(struct tcp_sock *tp, struct sk_buff *skb);
569 void tcp_fin(struct sock *sk);
570 
571 /* tcp_timer.c */
572 void tcp_init_xmit_timers(struct sock *);
573 static inline void tcp_clear_xmit_timers(struct sock *sk)
574 {
575         inet_csk_clear_xmit_timers(sk);
576 }
577 
578 unsigned int tcp_sync_mss(struct sock *sk, u32 pmtu);
579 unsigned int tcp_current_mss(struct sock *sk);
580 
581 /* Bound MSS / TSO packet size with the half of the window */
582 static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
583 {
584         int cutoff;
585 
586         /* When peer uses tiny windows, there is no use in packetizing
587          * to sub-MSS pieces for the sake of SWS or making sure there
588          * are enough packets in the pipe for fast recovery.
589          *
590          * On the other hand, for extremely large MSS devices, handling
591          * smaller than MSS windows in this way does make sense.
592          */
593         if (tp->max_window > TCP_MSS_DEFAULT)
594                 cutoff = (tp->max_window >> 1);
595         else
596                 cutoff = tp->max_window;
597 
598         if (cutoff && pktsize > cutoff)
599                 return max_t(int, cutoff, 68U - tp->tcp_header_len);
600         else
601                 return pktsize;
602 }
603 
604 /* tcp.c */
605 void tcp_get_info(struct sock *, struct tcp_info *);
606 
607 /* Read 'sendfile()'-style from a TCP socket */
608 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
609                   sk_read_actor_t recv_actor);
610 
611 void tcp_initialize_rcv_mss(struct sock *sk);
612 
613 int tcp_mtu_to_mss(struct sock *sk, int pmtu);
614 int tcp_mss_to_mtu(struct sock *sk, int mss);
615 void tcp_mtup_init(struct sock *sk);
616 void tcp_init_buffer_space(struct sock *sk);
617 
618 static inline void tcp_bound_rto(const struct sock *sk)
619 {
620         if (inet_csk(sk)->icsk_rto > TCP_RTO_MAX)
621                 inet_csk(sk)->icsk_rto = TCP_RTO_MAX;
622 }
623 
624 static inline u32 __tcp_set_rto(const struct tcp_sock *tp)
625 {
626         return usecs_to_jiffies((tp->srtt_us >> 3) + tp->rttvar_us);
627 }
628 
629 static inline void __tcp_fast_path_on(struct tcp_sock *tp, u32 snd_wnd)
630 {
631         tp->pred_flags = htonl((tp->tcp_header_len << 26) |
632                                ntohl(TCP_FLAG_ACK) |
633                                snd_wnd);
634 }
635 
636 static inline void tcp_fast_path_on(struct tcp_sock *tp)
637 {
638         __tcp_fast_path_on(tp, tp->snd_wnd >> tp->rx_opt.snd_wscale);
639 }
640 
641 static inline void tcp_fast_path_check(struct sock *sk)
642 {
643         struct tcp_sock *tp = tcp_sk(sk);
644 
645         if (RB_EMPTY_ROOT(&tp->out_of_order_queue) &&
646             tp->rcv_wnd &&
647             atomic_read(&sk->sk_rmem_alloc) < sk->sk_rcvbuf &&
648             !tp->urg_data)
649                 tcp_fast_path_on(tp);
650 }
651 
652 /* Compute the actual rto_min value */
653 static inline u32 tcp_rto_min(struct sock *sk)
654 {
655         const struct dst_entry *dst = __sk_dst_get(sk);
656         u32 rto_min = TCP_RTO_MIN;
657 
658         if (dst && dst_metric_locked(dst, RTAX_RTO_MIN))
659                 rto_min = dst_metric_rtt(dst, RTAX_RTO_MIN);
660         return rto_min;
661 }
662 
663 static inline u32 tcp_rto_min_us(struct sock *sk)
664 {
665         return jiffies_to_usecs(tcp_rto_min(sk));
666 }
667 
668 static inline bool tcp_ca_dst_locked(const struct dst_entry *dst)
669 {
670         return dst_metric_locked(dst, RTAX_CC_ALGO);
671 }
672 
673 /* Minimum RTT in usec. ~0 means not available. */
674 static inline u32 tcp_min_rtt(const struct tcp_sock *tp)
675 {
676         return minmax_get(&tp->rtt_min);
677 }
678 
679 /* Compute the actual receive window we are currently advertising.
680  * Rcv_nxt can be after the window if our peer push more data
681  * than the offered window.
682  */
683 static inline u32 tcp_receive_window(const struct tcp_sock *tp)
684 {
685         s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
686 
687         if (win < 0)
688                 win = 0;
689         return (u32) win;
690 }
691 
692 /* Choose a new window, without checks for shrinking, and without
693  * scaling applied to the result.  The caller does these things
694  * if necessary.  This is a "raw" window selection.
695  */
696 u32 __tcp_select_window(struct sock *sk);
697 
698 void tcp_send_window_probe(struct sock *sk);
699 
700 /* TCP timestamps are only 32-bits, this causes a slight
701  * complication on 64-bit systems since we store a snapshot
702  * of jiffies in the buffer control blocks below.  We decided
703  * to use only the low 32-bits of jiffies and hide the ugly
704  * casts with the following macro.
705  */
706 #define tcp_time_stamp          ((__u32)(jiffies))
707 
708 static inline u32 tcp_skb_timestamp(const struct sk_buff *skb)
709 {
710         return skb->skb_mstamp.stamp_jiffies;
711 }
712 
713 
714 #define tcp_flag_byte(th) (((u_int8_t *)th)[13])
715 
716 #define TCPHDR_FIN 0x01
717 #define TCPHDR_SYN 0x02
718 #define TCPHDR_RST 0x04
719 #define TCPHDR_PSH 0x08
720 #define TCPHDR_ACK 0x10
721 #define TCPHDR_URG 0x20
722 #define TCPHDR_ECE 0x40
723 #define TCPHDR_CWR 0x80
724 
725 #define TCPHDR_SYN_ECN  (TCPHDR_SYN | TCPHDR_ECE | TCPHDR_CWR)
726 
727 /* This is what the send packet queuing engine uses to pass
728  * TCP per-packet control information to the transmission code.
729  * We also store the host-order sequence numbers in here too.
730  * This is 44 bytes if IPV6 is enabled.
731  * If this grows please adjust skbuff.h:skbuff->cb[xxx] size appropriately.
732  */
733 struct tcp_skb_cb {
734         __u32           seq;            /* Starting sequence number     */
735         __u32           end_seq;        /* SEQ + FIN + SYN + datalen    */
736         union {
737                 /* Note : tcp_tw_isn is used in input path only
738                  *        (isn chosen by tcp_timewait_state_process())
739                  *
740                  *        tcp_gso_segs/size are used in write queue only,
741                  *        cf tcp_skb_pcount()/tcp_skb_mss()
742                  */
743                 __u32           tcp_tw_isn;
744                 struct {
745                         u16     tcp_gso_segs;
746                         u16     tcp_gso_size;
747                 };
748         };
749         __u8            tcp_flags;      /* TCP header flags. (tcp[13])  */
750 
751         __u8            sacked;         /* State flags for SACK/FACK.   */
752 #define TCPCB_SACKED_ACKED      0x01    /* SKB ACK'd by a SACK block    */
753 #define TCPCB_SACKED_RETRANS    0x02    /* SKB retransmitted            */
754 #define TCPCB_LOST              0x04    /* SKB is lost                  */
755 #define TCPCB_TAGBITS           0x07    /* All tag bits                 */
756 #define TCPCB_REPAIRED          0x10    /* SKB repaired (no skb_mstamp) */
757 #define TCPCB_EVER_RETRANS      0x80    /* Ever retransmitted frame     */
758 #define TCPCB_RETRANS           (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS| \
759                                 TCPCB_REPAIRED)
760 
761         __u8            ip_dsfield;     /* IPv4 tos or IPv6 dsfield     */
762         __u8            txstamp_ack:1,  /* Record TX timestamp for ack? */
763                         eor:1,          /* Is skb MSG_EOR marked? */
764                         unused:6;
765         __u32           ack_seq;        /* Sequence number ACK'd        */
766         union {
767                 struct {
768                         /* There is space for up to 24 bytes */
769                         __u32 in_flight:30,/* Bytes in flight at transmit */
770                               is_app_limited:1, /* cwnd not fully used? */
771                               unused:1;
772                         /* pkts S/ACKed so far upon tx of skb, incl retrans: */
773                         __u32 delivered;
774                         /* start of send pipeline phase */
775                         struct skb_mstamp first_tx_mstamp;
776                         /* when we reached the "delivered" count */
777                         struct skb_mstamp delivered_mstamp;
778                 } tx;   /* only used for outgoing skbs */
779                 union {
780                         struct inet_skb_parm    h4;
781 #if IS_ENABLED(CONFIG_IPV6)
782                         struct inet6_skb_parm   h6;
783 #endif
784                 } header;       /* For incoming skbs */
785         };
786 };
787 
788 #define TCP_SKB_CB(__skb)       ((struct tcp_skb_cb *)&((__skb)->cb[0]))
789 
790 
791 #if IS_ENABLED(CONFIG_IPV6)
792 /* This is the variant of inet6_iif() that must be used by TCP,
793  * as TCP moves IP6CB into a different location in skb->cb[]
794  */
795 static inline int tcp_v6_iif(const struct sk_buff *skb)
796 {
797         bool l3_slave = ipv6_l3mdev_skb(TCP_SKB_CB(skb)->header.h6.flags);
798 
799         return l3_slave ? skb->skb_iif : TCP_SKB_CB(skb)->header.h6.iif;
800 }
801 #endif
802 
803 /* TCP_SKB_CB reference means this can not be used from early demux */
804 static inline bool inet_exact_dif_match(struct net *net, struct sk_buff *skb)
805 {
806 #if IS_ENABLED(CONFIG_NET_L3_MASTER_DEV)
807         if (!net->ipv4.sysctl_tcp_l3mdev_accept &&
808             skb && ipv4_l3mdev_skb(TCP_SKB_CB(skb)->header.h4.flags))
809                 return true;
810 #endif
811         return false;
812 }
813 
814 /* Due to TSO, an SKB can be composed of multiple actual
815  * packets.  To keep these tracked properly, we use this.
816  */
817 static inline int tcp_skb_pcount(const struct sk_buff *skb)
818 {
819         return TCP_SKB_CB(skb)->tcp_gso_segs;
820 }
821 
822 static inline void tcp_skb_pcount_set(struct sk_buff *skb, int segs)
823 {
824         TCP_SKB_CB(skb)->tcp_gso_segs = segs;
825 }
826 
827 static inline void tcp_skb_pcount_add(struct sk_buff *skb, int segs)
828 {
829         TCP_SKB_CB(skb)->tcp_gso_segs += segs;
830 }
831 
832 /* This is valid iff skb is in write queue and tcp_skb_pcount() > 1. */
833 static inline int tcp_skb_mss(const struct sk_buff *skb)
834 {
835         return TCP_SKB_CB(skb)->tcp_gso_size;
836 }
837 
838 static inline bool tcp_skb_can_collapse_to(const struct sk_buff *skb)
839 {
840         return likely(!TCP_SKB_CB(skb)->eor);
841 }
842 
843 /* Events passed to congestion control interface */
844 enum tcp_ca_event {
845         CA_EVENT_TX_START,      /* first transmit when no packets in flight */
846         CA_EVENT_CWND_RESTART,  /* congestion window restart */
847         CA_EVENT_COMPLETE_CWR,  /* end of congestion recovery */
848         CA_EVENT_LOSS,          /* loss timeout */
849         CA_EVENT_ECN_NO_CE,     /* ECT set, but not CE marked */
850         CA_EVENT_ECN_IS_CE,     /* received CE marked IP packet */
851         CA_EVENT_DELAYED_ACK,   /* Delayed ack is sent */
852         CA_EVENT_NON_DELAYED_ACK,
853 };
854 
855 /* Information about inbound ACK, passed to cong_ops->in_ack_event() */
856 enum tcp_ca_ack_event_flags {
857         CA_ACK_SLOWPATH         = (1 << 0),     /* In slow path processing */
858         CA_ACK_WIN_UPDATE       = (1 << 1),     /* ACK updated window */
859         CA_ACK_ECE              = (1 << 2),     /* ECE bit is set on ack */
860 };
861 
862 /*
863  * Interface for adding new TCP congestion control handlers
864  */
865 #define TCP_CA_NAME_MAX 16
866 #define TCP_CA_MAX      128
867 #define TCP_CA_BUF_MAX  (TCP_CA_NAME_MAX*TCP_CA_MAX)
868 
869 #define TCP_CA_UNSPEC   0
870 
871 /* Algorithm can be set on socket without CAP_NET_ADMIN privileges */
872 #define TCP_CONG_NON_RESTRICTED 0x1
873 /* Requires ECN/ECT set on all packets */
874 #define TCP_CONG_NEEDS_ECN      0x2
875 
876 union tcp_cc_info;
877 
878 struct ack_sample {
879         u32 pkts_acked;
880         s32 rtt_us;
881         u32 in_flight;
882 };
883 
884 /* A rate sample measures the number of (original/retransmitted) data
885  * packets delivered "delivered" over an interval of time "interval_us".
886  * The tcp_rate.c code fills in the rate sample, and congestion
887  * control modules that define a cong_control function to run at the end
888  * of ACK processing can optionally chose to consult this sample when
889  * setting cwnd and pacing rate.
890  * A sample is invalid if "delivered" or "interval_us" is negative.
891  */
892 struct rate_sample {
893         struct  skb_mstamp prior_mstamp; /* starting timestamp for interval */
894         u32  prior_delivered;   /* tp->delivered at "prior_mstamp" */
895         s32  delivered;         /* number of packets delivered over interval */
896         long interval_us;       /* time for tp->delivered to incr "delivered" */
897         long rtt_us;            /* RTT of last (S)ACKed packet (or -1) */
898         int  losses;            /* number of packets marked lost upon ACK */
899         u32  acked_sacked;      /* number of packets newly (S)ACKed upon ACK */
900         u32  prior_in_flight;   /* in flight before this ACK */
901         bool is_app_limited;    /* is sample from packet with bubble in pipe? */
902         bool is_retrans;        /* is sample from retransmission? */
903 };
904 
905 struct tcp_congestion_ops {
906         struct list_head        list;
907         u32 key;
908         u32 flags;
909 
910         /* initialize private data (optional) */
911         void (*init)(struct sock *sk);
912         /* cleanup private data  (optional) */
913         void (*release)(struct sock *sk);
914 
915         /* return slow start threshold (required) */
916         u32 (*ssthresh)(struct sock *sk);
917         /* do new cwnd calculation (required) */
918         void (*cong_avoid)(struct sock *sk, u32 ack, u32 acked);
919         /* call before changing ca_state (optional) */
920         void (*set_state)(struct sock *sk, u8 new_state);
921         /* call when cwnd event occurs (optional) */
922         void (*cwnd_event)(struct sock *sk, enum tcp_ca_event ev);
923         /* call when ack arrives (optional) */
924         void (*in_ack_event)(struct sock *sk, u32 flags);
925         /* new value of cwnd after loss (optional) */
926         u32  (*undo_cwnd)(struct sock *sk);
927         /* hook for packet ack accounting (optional) */
928         void (*pkts_acked)(struct sock *sk, const struct ack_sample *sample);
929         /* suggest number of segments for each skb to transmit (optional) */
930         u32 (*tso_segs_goal)(struct sock *sk);
931         /* returns the multiplier used in tcp_sndbuf_expand (optional) */
932         u32 (*sndbuf_expand)(struct sock *sk);
933         /* call when packets are delivered to update cwnd and pacing rate,
934          * after all the ca_state processing. (optional)
935          */
936         void (*cong_control)(struct sock *sk, const struct rate_sample *rs);
937         /* get info for inet_diag (optional) */
938         size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
939                            union tcp_cc_info *info);
940 
941         char            name[TCP_CA_NAME_MAX];
942         struct module   *owner;
943 };
944 
945 int tcp_register_congestion_control(struct tcp_congestion_ops *type);
946 void tcp_unregister_congestion_control(struct tcp_congestion_ops *type);
947 
948 void tcp_assign_congestion_control(struct sock *sk);
949 void tcp_init_congestion_control(struct sock *sk);
950 void tcp_cleanup_congestion_control(struct sock *sk);
951 int tcp_set_default_congestion_control(const char *name);
952 void tcp_get_default_congestion_control(char *name);
953 void tcp_get_available_congestion_control(char *buf, size_t len);
954 void tcp_get_allowed_congestion_control(char *buf, size_t len);
955 int tcp_set_allowed_congestion_control(char *allowed);
956 int tcp_set_congestion_control(struct sock *sk, const char *name);
957 u32 tcp_slow_start(struct tcp_sock *tp, u32 acked);
958 void tcp_cong_avoid_ai(struct tcp_sock *tp, u32 w, u32 acked);
959 
960 u32 tcp_reno_ssthresh(struct sock *sk);
961 void tcp_reno_cong_avoid(struct sock *sk, u32 ack, u32 acked);
962 extern struct tcp_congestion_ops tcp_reno;
963 
964 struct tcp_congestion_ops *tcp_ca_find_key(u32 key);
965 u32 tcp_ca_get_key_by_name(const char *name, bool *ecn_ca);
966 #ifdef CONFIG_INET
967 char *tcp_ca_get_name_by_key(u32 key, char *buffer);
968 #else
969 static inline char *tcp_ca_get_name_by_key(u32 key, char *buffer)
970 {
971         return NULL;
972 }
973 #endif
974 
975 static inline bool tcp_ca_needs_ecn(const struct sock *sk)
976 {
977         const struct inet_connection_sock *icsk = inet_csk(sk);
978 
979         return icsk->icsk_ca_ops->flags & TCP_CONG_NEEDS_ECN;
980 }
981 
982 static inline void tcp_set_ca_state(struct sock *sk, const u8 ca_state)
983 {
984         struct inet_connection_sock *icsk = inet_csk(sk);
985 
986         if (icsk->icsk_ca_ops->set_state)
987                 icsk->icsk_ca_ops->set_state(sk, ca_state);
988         icsk->icsk_ca_state = ca_state;
989 }
990 
991 static inline void tcp_ca_event(struct sock *sk, const enum tcp_ca_event event)
992 {
993         const struct inet_connection_sock *icsk = inet_csk(sk);
994 
995         if (icsk->icsk_ca_ops->cwnd_event)
996                 icsk->icsk_ca_ops->cwnd_event(sk, event);
997 }
998 
999 /* From tcp_rate.c */
1000 void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
1001 void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
1002                             struct rate_sample *rs);
1003 void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
1004                   struct skb_mstamp *now, struct rate_sample *rs);
1005 void tcp_rate_check_app_limited(struct sock *sk);
1006 
1007 /* These functions determine how the current flow behaves in respect of SACK
1008  * handling. SACK is negotiated with the peer, and therefore it can vary
1009  * between different flows.
1010  *
1011  * tcp_is_sack - SACK enabled
1012  * tcp_is_reno - No SACK
1013  * tcp_is_fack - FACK enabled, implies SACK enabled
1014  */
1015 static inline int tcp_is_sack(const struct tcp_sock *tp)
1016 {
1017         return tp->rx_opt.sack_ok;
1018 }
1019 
1020 static inline bool tcp_is_reno(const struct tcp_sock *tp)
1021 {
1022         return !tcp_is_sack(tp);
1023 }
1024 
1025 static inline bool tcp_is_fack(const struct tcp_sock *tp)
1026 {
1027         return tp->rx_opt.sack_ok & TCP_FACK_ENABLED;
1028 }
1029 
1030 static inline void tcp_enable_fack(struct tcp_sock *tp)
1031 {
1032         tp->rx_opt.sack_ok |= TCP_FACK_ENABLED;
1033 }
1034 
1035 /* TCP early-retransmit (ER) is similar to but more conservative than
1036  * the thin-dupack feature.  Enable ER only if thin-dupack is disabled.
1037  */
1038 static inline void tcp_enable_early_retrans(struct tcp_sock *tp)
1039 {
1040         struct net *net = sock_net((struct sock *)tp);
1041 
1042         tp->do_early_retrans = sysctl_tcp_early_retrans &&
1043                 sysctl_tcp_early_retrans < 4 && !sysctl_tcp_thin_dupack &&
1044                 net->ipv4.sysctl_tcp_reordering == 3;
1045 }
1046 
1047 static inline void tcp_disable_early_retrans(struct tcp_sock *tp)
1048 {
1049         tp->do_early_retrans = 0;
1050 }
1051 
1052 static inline unsigned int tcp_left_out(const struct tcp_sock *tp)
1053 {
1054         return tp->sacked_out + tp->lost_out;
1055 }
1056 
1057 /* This determines how many packets are "in the network" to the best
1058  * of our knowledge.  In many cases it is conservative, but where
1059  * detailed information is available from the receiver (via SACK
1060  * blocks etc.) we can make more aggressive calculations.
1061  *
1062  * Use this for decisions involving congestion control, use just
1063  * tp->packets_out to determine if the send queue is empty or not.
1064  *
1065  * Read this equation as:
1066  *
1067  *      "Packets sent once on transmission queue" MINUS
1068  *      "Packets left network, but not honestly ACKed yet" PLUS
1069  *      "Packets fast retransmitted"
1070  */
1071 static inline unsigned int tcp_packets_in_flight(const struct tcp_sock *tp)
1072 {
1073         return tp->packets_out - tcp_left_out(tp) + tp->retrans_out;
1074 }
1075 
1076 #define TCP_INFINITE_SSTHRESH   0x7fffffff
1077 
1078 static inline bool tcp_in_slow_start(const struct tcp_sock *tp)
1079 {
1080         return tp->snd_cwnd < tp->snd_ssthresh;
1081 }
1082 
1083 static inline bool tcp_in_initial_slowstart(const struct tcp_sock *tp)
1084 {
1085         return tp->snd_ssthresh >= TCP_INFINITE_SSTHRESH;
1086 }
1087 
1088 static inline bool tcp_in_cwnd_reduction(const struct sock *sk)
1089 {
1090         return (TCPF_CA_CWR | TCPF_CA_Recovery) &
1091                (1 << inet_csk(sk)->icsk_ca_state);
1092 }
1093 
1094 /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
1095  * The exception is cwnd reduction phase, when cwnd is decreasing towards
1096  * ssthresh.
1097  */
1098 static inline __u32 tcp_current_ssthresh(const struct sock *sk)
1099 {
1100         const struct tcp_sock *tp = tcp_sk(sk);
1101 
1102         if (tcp_in_cwnd_reduction(sk))
1103                 return tp->snd_ssthresh;
1104         else
1105                 return max(tp->snd_ssthresh,
1106                            ((tp->snd_cwnd >> 1) +
1107                             (tp->snd_cwnd >> 2)));
1108 }
1109 
1110 /* Use define here intentionally to get WARN_ON location shown at the caller */
1111 #define tcp_verify_left_out(tp) WARN_ON(tcp_left_out(tp) > tp->packets_out)
1112 
1113 void tcp_enter_cwr(struct sock *sk);
1114 __u32 tcp_init_cwnd(const struct tcp_sock *tp, const struct dst_entry *dst);
1115 
1116 /* The maximum number of MSS of available cwnd for which TSO defers
1117  * sending if not using sysctl_tcp_tso_win_divisor.
1118  */
1119 static inline __u32 tcp_max_tso_deferred_mss(const struct tcp_sock *tp)
1120 {
1121         return 3;
1122 }
1123 
1124 /* Returns end sequence number of the receiver's advertised window */
1125 static inline u32 tcp_wnd_end(const struct tcp_sock *tp)
1126 {
1127         return tp->snd_una + tp->snd_wnd;
1128 }
1129 
1130 /* We follow the spirit of RFC2861 to validate cwnd but implement a more
1131  * flexible approach. The RFC suggests cwnd should not be raised unless
1132  * it was fully used previously. And that's exactly what we do in
1133  * congestion avoidance mode. But in slow start we allow cwnd to grow
1134  * as long as the application has used half the cwnd.
1135  * Example :
1136  *    cwnd is 10 (IW10), but application sends 9 frames.
1137  *    We allow cwnd to reach 18 when all frames are ACKed.
1138  * This check is safe because it's as aggressive as slow start which already
1139  * risks 100% overshoot. The advantage is that we discourage application to
1140  * either send more filler packets or data to artificially blow up the cwnd
1141  * usage, and allow application-limited process to probe bw more aggressively.
1142  */
1143 static inline bool tcp_is_cwnd_limited(const struct sock *sk)
1144 {
1145         const struct tcp_sock *tp = tcp_sk(sk);
1146 
1147         /* If in slow start, ensure cwnd grows to twice what was ACKed. */
1148         if (tcp_in_slow_start(tp))
1149                 return tp->snd_cwnd < 2 * tp->max_packets_out;
1150 
1151         return tp->is_cwnd_limited;
1152 }
1153 
1154 /* Something is really bad, we could not queue an additional packet,
1155  * because qdisc is full or receiver sent a 0 window.
1156  * We do not want to add fuel to the fire, or abort too early,
1157  * so make sure the timer we arm now is at least 200ms in the future,
1158  * regardless of current icsk_rto value (as it could be ~2ms)
1159  */
1160 static inline unsigned long tcp_probe0_base(const struct sock *sk)
1161 {
1162         return max_t(unsigned long, inet_csk(sk)->icsk_rto, TCP_RTO_MIN);
1163 }
1164 
1165 /* Variant of inet_csk_rto_backoff() used for zero window probes */
1166 static inline unsigned long tcp_probe0_when(const struct sock *sk,
1167                                             unsigned long max_when)
1168 {
1169         u64 when = (u64)tcp_probe0_base(sk) << inet_csk(sk)->icsk_backoff;
1170 
1171         return (unsigned long)min_t(u64, when, max_when);
1172 }
1173 
1174 static inline void tcp_check_probe_timer(struct sock *sk)
1175 {
1176         if (!tcp_sk(sk)->packets_out && !inet_csk(sk)->icsk_pending)
1177                 inet_csk_reset_xmit_timer(sk, ICSK_TIME_PROBE0,
1178                                           tcp_probe0_base(sk), TCP_RTO_MAX);
1179 }
1180 
1181 static inline void tcp_init_wl(struct tcp_sock *tp, u32 seq)
1182 {
1183         tp->snd_wl1 = seq;
1184 }
1185 
1186 static inline void tcp_update_wl(struct tcp_sock *tp, u32 seq)
1187 {
1188         tp->snd_wl1 = seq;
1189 }
1190 
1191 /*
1192  * Calculate(/check) TCP checksum
1193  */
1194 static inline __sum16 tcp_v4_check(int len, __be32 saddr,
1195                                    __be32 daddr, __wsum base)
1196 {
1197         return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
1198 }
1199 
1200 static inline __sum16 __tcp_checksum_complete(struct sk_buff *skb)
1201 {
1202         return __skb_checksum_complete(skb);
1203 }
1204 
1205 static inline bool tcp_checksum_complete(struct sk_buff *skb)
1206 {
1207         return !skb_csum_unnecessary(skb) &&
1208                 __tcp_checksum_complete(skb);
1209 }
1210 
1211 /* Prequeue for VJ style copy to user, combined with checksumming. */
1212 
1213 static inline void tcp_prequeue_init(struct tcp_sock *tp)
1214 {
1215         tp->ucopy.task = NULL;
1216         tp->ucopy.len = 0;
1217         tp->ucopy.memory = 0;
1218         skb_queue_head_init(&tp->ucopy.prequeue);
1219 }
1220 
1221 bool tcp_prequeue(struct sock *sk, struct sk_buff *skb);
1222 bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb);
1223 int tcp_filter(struct sock *sk, struct sk_buff *skb);
1224 
1225 #undef STATE_TRACE
1226 
1227 #ifdef STATE_TRACE
1228 static const char *statename[]={
1229         "Unused","Established","Syn Sent","Syn Recv",
1230         "Fin Wait 1","Fin Wait 2","Time Wait", "Close",
1231         "Close Wait","Last ACK","Listen","Closing"
1232 };
1233 #endif
1234 void tcp_set_state(struct sock *sk, int state);
1235 
1236 void tcp_done(struct sock *sk);
1237 
1238 int tcp_abort(struct sock *sk, int err);
1239 
1240 static inline void tcp_sack_reset(struct tcp_options_received *rx_opt)
1241 {
1242         rx_opt->dsack = 0;
1243         rx_opt->num_sacks = 0;
1244 }
1245 
1246 u32 tcp_default_init_rwnd(u32 mss);
1247 void tcp_cwnd_restart(struct sock *sk, s32 delta);
1248 
1249 static inline void tcp_slow_start_after_idle_check(struct sock *sk)
1250 {
1251         struct tcp_sock *tp = tcp_sk(sk);
1252         s32 delta;
1253 
1254         if (!sysctl_tcp_slow_start_after_idle || tp->packets_out)
1255                 return;
1256         delta = tcp_time_stamp - tp->lsndtime;
1257         if (delta > inet_csk(sk)->icsk_rto)
1258                 tcp_cwnd_restart(sk, delta);
1259 }
1260 
1261 /* Determine a window scaling and initial window to offer. */
1262 void tcp_select_initial_window(int __space, __u32 mss, __u32 *rcv_wnd,
1263                                __u32 *window_clamp, int wscale_ok,
1264                                __u8 *rcv_wscale, __u32 init_rcv_wnd);
1265 
1266 static inline int tcp_win_from_space(int space)
1267 {
1268         return sysctl_tcp_adv_win_scale<=0 ?
1269                 (space>>(-sysctl_tcp_adv_win_scale)) :
1270                 space - (space>>sysctl_tcp_adv_win_scale);
1271 }
1272 
1273 /* Note: caller must be prepared to deal with negative returns */
1274 static inline int tcp_space(const struct sock *sk)
1275 {
1276         return tcp_win_from_space(sk->sk_rcvbuf -
1277                                   atomic_read(&sk->sk_rmem_alloc));
1278 }
1279 
1280 static inline int tcp_full_space(const struct sock *sk)
1281 {
1282         return tcp_win_from_space(sk->sk_rcvbuf);
1283 }
1284 
1285 extern void tcp_openreq_init_rwin(struct request_sock *req,
1286                                   const struct sock *sk_listener,
1287                                   const struct dst_entry *dst);
1288 
1289 void tcp_enter_memory_pressure(struct sock *sk);
1290 
1291 static inline int keepalive_intvl_when(const struct tcp_sock *tp)
1292 {
1293         struct net *net = sock_net((struct sock *)tp);
1294 
1295         return tp->keepalive_intvl ? : net->ipv4.sysctl_tcp_keepalive_intvl;
1296 }
1297 
1298 static inline int keepalive_time_when(const struct tcp_sock *tp)
1299 {
1300         struct net *net = sock_net((struct sock *)tp);
1301 
1302         return tp->keepalive_time ? : net->ipv4.sysctl_tcp_keepalive_time;
1303 }
1304 
1305 static inline int keepalive_probes(const struct tcp_sock *tp)
1306 {
1307         struct net *net = sock_net((struct sock *)tp);
1308 
1309         return tp->keepalive_probes ? : net->ipv4.sysctl_tcp_keepalive_probes;
1310 }
1311 
1312 static inline u32 keepalive_time_elapsed(const struct tcp_sock *tp)
1313 {
1314         const struct inet_connection_sock *icsk = &tp->inet_conn;
1315 
1316         return min_t(u32, tcp_time_stamp - icsk->icsk_ack.lrcvtime,
1317                           tcp_time_stamp - tp->rcv_tstamp);
1318 }
1319 
1320 static inline int tcp_fin_time(const struct sock *sk)
1321 {
1322         int fin_timeout = tcp_sk(sk)->linger2 ? : sock_net(sk)->ipv4.sysctl_tcp_fin_timeout;
1323         const int rto = inet_csk(sk)->icsk_rto;
1324 
1325         if (fin_timeout < (rto << 2) - (rto >> 1))
1326                 fin_timeout = (rto << 2) - (rto >> 1);
1327 
1328         return fin_timeout;
1329 }
1330 
1331 static inline bool tcp_paws_check(const struct tcp_options_received *rx_opt,
1332                                   int paws_win)
1333 {
1334         if ((s32)(rx_opt->ts_recent - rx_opt->rcv_tsval) <= paws_win)
1335                 return true;
1336         if (unlikely(get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_24DAYS))
1337                 return true;
1338         /*
1339          * Some OSes send SYN and SYNACK messages with tsval=0 tsecr=0,
1340          * then following tcp messages have valid values. Ignore 0 value,
1341          * or else 'negative' tsval might forbid us to accept their packets.
1342          */
1343         if (!rx_opt->ts_recent)
1344                 return true;
1345         return false;
1346 }
1347 
1348 static inline bool tcp_paws_reject(const struct tcp_options_received *rx_opt,
1349                                    int rst)
1350 {
1351         if (tcp_paws_check(rx_opt, 0))
1352                 return false;
1353 
1354         /* RST segments are not recommended to carry timestamp,
1355            and, if they do, it is recommended to ignore PAWS because
1356            "their cleanup function should take precedence over timestamps."
1357            Certainly, it is mistake. It is necessary to understand the reasons
1358            of this constraint to relax it: if peer reboots, clock may go
1359            out-of-sync and half-open connections will not be reset.
1360            Actually, the problem would be not existing if all
1361            the implementations followed draft about maintaining clock
1362            via reboots. Linux-2.2 DOES NOT!
1363 
1364            However, we can relax time bounds for RST segments to MSL.
1365          */
1366         if (rst && get_seconds() >= rx_opt->ts_recent_stamp + TCP_PAWS_MSL)
1367                 return false;
1368         return true;
1369 }
1370 
1371 bool tcp_oow_rate_limited(struct net *net, const struct sk_buff *skb,
1372                           int mib_idx, u32 *last_oow_ack_time);
1373 
1374 static inline void tcp_mib_init(struct net *net)
1375 {
1376         /* See RFC 2012 */
1377         TCP_ADD_STATS(net, TCP_MIB_RTOALGORITHM, 1);
1378         TCP_ADD_STATS(net, TCP_MIB_RTOMIN, TCP_RTO_MIN*1000/HZ);
1379         TCP_ADD_STATS(net, TCP_MIB_RTOMAX, TCP_RTO_MAX*1000/HZ);
1380         TCP_ADD_STATS(net, TCP_MIB_MAXCONN, -1);
1381 }
1382 
1383 /* from STCP */
1384 static inline void tcp_clear_retrans_hints_partial(struct tcp_sock *tp)
1385 {
1386         tp->lost_skb_hint = NULL;
1387 }
1388 
1389 static inline void tcp_clear_all_retrans_hints(struct tcp_sock *tp)
1390 {
1391         tcp_clear_retrans_hints_partial(tp);
1392         tp->retransmit_skb_hint = NULL;
1393 }
1394 
1395 union tcp_md5_addr {
1396         struct in_addr  a4;
1397 #if IS_ENABLED(CONFIG_IPV6)
1398         struct in6_addr a6;
1399 #endif
1400 };
1401 
1402 /* - key database */
1403 struct tcp_md5sig_key {
1404         struct hlist_node       node;
1405         u8                      keylen;
1406         u8                      family; /* AF_INET or AF_INET6 */
1407         union tcp_md5_addr      addr;
1408         u8                      key[TCP_MD5SIG_MAXKEYLEN];
1409         struct rcu_head         rcu;
1410 };
1411 
1412 /* - sock block */
1413 struct tcp_md5sig_info {
1414         struct hlist_head       head;
1415         struct rcu_head         rcu;
1416 };
1417 
1418 /* - pseudo header */
1419 struct tcp4_pseudohdr {
1420         __be32          saddr;
1421         __be32          daddr;
1422         __u8            pad;
1423         __u8            protocol;
1424         __be16          len;
1425 };
1426 
1427 struct tcp6_pseudohdr {
1428         struct in6_addr saddr;
1429         struct in6_addr daddr;
1430         __be32          len;
1431         __be32          protocol;       /* including padding */
1432 };
1433 
1434 union tcp_md5sum_block {
1435         struct tcp4_pseudohdr ip4;
1436 #if IS_ENABLED(CONFIG_IPV6)
1437         struct tcp6_pseudohdr ip6;
1438 #endif
1439 };
1440 
1441 /* - pool: digest algorithm, hash description and scratch buffer */
1442 struct tcp_md5sig_pool {
1443         struct ahash_request    *md5_req;
1444         void                    *scratch;
1445 };
1446 
1447 /* - functions */
1448 int tcp_v4_md5_hash_skb(char *md5_hash, const struct tcp_md5sig_key *key,
1449                         const struct sock *sk, const struct sk_buff *skb);
1450 int tcp_md5_do_add(struct sock *sk, const union tcp_md5_addr *addr,
1451                    int family, const u8 *newkey, u8 newkeylen, gfp_t gfp);
1452 int tcp_md5_do_del(struct sock *sk, const union tcp_md5_addr *addr,
1453                    int family);
1454 struct tcp_md5sig_key *tcp_v4_md5_lookup(const struct sock *sk,
1455                                          const struct sock *addr_sk);
1456 
1457 #ifdef CONFIG_TCP_MD5SIG
1458 struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
1459                                          const union tcp_md5_addr *addr,
1460                                          int family);
1461 #define tcp_twsk_md5_key(twsk)  ((twsk)->tw_md5_key)
1462 #else
1463 static inline struct tcp_md5sig_key *tcp_md5_do_lookup(const struct sock *sk,
1464                                          const union tcp_md5_addr *addr,
1465                                          int family)
1466 {
1467         return NULL;
1468 }
1469 #define tcp_twsk_md5_key(twsk)  NULL
1470 #endif
1471 
1472 bool tcp_alloc_md5sig_pool(void);
1473 
1474 struct tcp_md5sig_pool *tcp_get_md5sig_pool(void);
1475 static inline void tcp_put_md5sig_pool(void)
1476 {
1477         local_bh_enable();
1478 }
1479 
1480 int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *, const struct sk_buff *,
1481                           unsigned int header_len);
1482 int tcp_md5_hash_key(struct tcp_md5sig_pool *hp,
1483                      const struct tcp_md5sig_key *key);
1484 
1485 /* From tcp_fastopen.c */
1486 void tcp_fastopen_cache_get(struct sock *sk, u16 *mss,
1487                             struct tcp_fastopen_cookie *cookie, int *syn_loss,
1488                             unsigned long *last_syn_loss);
1489 void tcp_fastopen_cache_set(struct sock *sk, u16 mss,
1490                             struct tcp_fastopen_cookie *cookie, bool syn_lost,
1491                             u16 try_exp);
1492 struct tcp_fastopen_request {
1493         /* Fast Open cookie. Size 0 means a cookie request */
1494         struct tcp_fastopen_cookie      cookie;
1495         struct msghdr                   *data;  /* data in MSG_FASTOPEN */
1496         size_t                          size;
1497         int                             copied; /* queued in tcp_connect() */
1498 };
1499 void tcp_free_fastopen_req(struct tcp_sock *tp);
1500 
1501 extern struct tcp_fastopen_context __rcu *tcp_fastopen_ctx;
1502 int tcp_fastopen_reset_cipher(void *key, unsigned int len);
1503 void tcp_fastopen_add_skb(struct sock *sk, struct sk_buff *skb);
1504 struct sock *tcp_try_fastopen(struct sock *sk, struct sk_buff *skb,
1505                               struct request_sock *req,
1506                               struct tcp_fastopen_cookie *foc,
1507                               struct dst_entry *dst);
1508 void tcp_fastopen_init_key_once(bool publish);
1509 #define TCP_FASTOPEN_KEY_LENGTH 16
1510 
1511 /* Fastopen key context */
1512 struct tcp_fastopen_context {
1513         struct crypto_cipher    *tfm;
1514         __u8                    key[TCP_FASTOPEN_KEY_LENGTH];
1515         struct rcu_head         rcu;
1516 };
1517 
1518 /* write queue abstraction */
1519 static inline void tcp_write_queue_purge(struct sock *sk)
1520 {
1521         struct sk_buff *skb;
1522 
1523         while ((skb = __skb_dequeue(&sk->sk_write_queue)) != NULL)
1524                 sk_wmem_free_skb(sk, skb);
1525         sk_mem_reclaim(sk);
1526         tcp_clear_all_retrans_hints(tcp_sk(sk));
1527 }
1528 
1529 static inline struct sk_buff *tcp_write_queue_head(const struct sock *sk)
1530 {
1531         return skb_peek(&sk->sk_write_queue);
1532 }
1533 
1534 static inline struct sk_buff *tcp_write_queue_tail(const struct sock *sk)
1535 {
1536         return skb_peek_tail(&sk->sk_write_queue);
1537 }
1538 
1539 static inline struct sk_buff *tcp_write_queue_next(const struct sock *sk,
1540                                                    const struct sk_buff *skb)
1541 {
1542         return skb_queue_next(&sk->sk_write_queue, skb);
1543 }
1544 
1545 static inline struct sk_buff *tcp_write_queue_prev(const struct sock *sk,
1546                                                    const struct sk_buff *skb)
1547 {
1548         return skb_queue_prev(&sk->sk_write_queue, skb);
1549 }
1550 
1551 #define tcp_for_write_queue(skb, sk)                                    \
1552         skb_queue_walk(&(sk)->sk_write_queue, skb)
1553 
1554 #define tcp_for_write_queue_from(skb, sk)                               \
1555         skb_queue_walk_from(&(sk)->sk_write_queue, skb)
1556 
1557 #define tcp_for_write_queue_from_safe(skb, tmp, sk)                     \
1558         skb_queue_walk_from_safe(&(sk)->sk_write_queue, skb, tmp)
1559 
1560 static inline struct sk_buff *tcp_send_head(const struct sock *sk)
1561 {
1562         return sk->sk_send_head;
1563 }
1564 
1565 static inline bool tcp_skb_is_last(const struct sock *sk,
1566                                    const struct sk_buff *skb)
1567 {
1568         return skb_queue_is_last(&sk->sk_write_queue, skb);
1569 }
1570 
1571 static inline void tcp_advance_send_head(struct sock *sk, const struct sk_buff *skb)
1572 {
1573         if (tcp_skb_is_last(sk, skb))
1574                 sk->sk_send_head = NULL;
1575         else
1576                 sk->sk_send_head = tcp_write_queue_next(sk, skb);
1577 }
1578 
1579 static inline void tcp_check_send_head(struct sock *sk, struct sk_buff *skb_unlinked)
1580 {
1581         if (sk->sk_send_head == skb_unlinked)
1582                 sk->sk_send_head = NULL;
1583         if (tcp_sk(sk)->highest_sack == skb_unlinked)
1584                 tcp_sk(sk)->highest_sack = NULL;
1585 }
1586 
1587 static inline void tcp_init_send_head(struct sock *sk)
1588 {
1589         sk->sk_send_head = NULL;
1590 }
1591 
1592 static inline void __tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1593 {
1594         __skb_queue_tail(&sk->sk_write_queue, skb);
1595 }
1596 
1597 static inline void tcp_add_write_queue_tail(struct sock *sk, struct sk_buff *skb)
1598 {
1599         __tcp_add_write_queue_tail(sk, skb);
1600 
1601         /* Queue it, remembering where we must start sending. */
1602         if (sk->sk_send_head == NULL) {
1603                 sk->sk_send_head = skb;
1604 
1605                 if (tcp_sk(sk)->highest_sack == NULL)
1606                         tcp_sk(sk)->highest_sack = skb;
1607         }
1608 }
1609 
1610 static inline void __tcp_add_write_queue_head(struct sock *sk, struct sk_buff *skb)
1611 {
1612         __skb_queue_head(&sk->sk_write_queue, skb);
1613 }
1614 
1615 /* Insert buff after skb on the write queue of sk.  */
1616 static inline void tcp_insert_write_queue_after(struct sk_buff *skb,
1617                                                 struct sk_buff *buff,
1618                                                 struct sock *sk)
1619 {
1620         __skb_queue_after(&sk->sk_write_queue, skb, buff);
1621 }
1622 
1623 /* Insert new before skb on the write queue of sk.  */
1624 static inline void tcp_insert_write_queue_before(struct sk_buff *new,
1625                                                   struct sk_buff *skb,
1626                                                   struct sock *sk)
1627 {
1628         __skb_queue_before(&sk->sk_write_queue, skb, new);
1629 
1630         if (sk->sk_send_head == skb)
1631                 sk->sk_send_head = new;
1632 }
1633 
1634 static inline void tcp_unlink_write_queue(struct sk_buff *skb, struct sock *sk)
1635 {
1636         __skb_unlink(skb, &sk->sk_write_queue);
1637 }
1638 
1639 static inline bool tcp_write_queue_empty(struct sock *sk)
1640 {
1641         return skb_queue_empty(&sk->sk_write_queue);
1642 }
1643 
1644 static inline void tcp_push_pending_frames(struct sock *sk)
1645 {
1646         if (tcp_send_head(sk)) {
1647                 struct tcp_sock *tp = tcp_sk(sk);
1648 
1649                 __tcp_push_pending_frames(sk, tcp_current_mss(sk), tp->nonagle);
1650         }
1651 }
1652 
1653 /* Start sequence of the skb just after the highest skb with SACKed
1654  * bit, valid only if sacked_out > 0 or when the caller has ensured
1655  * validity by itself.
1656  */
1657 static inline u32 tcp_highest_sack_seq(struct tcp_sock *tp)
1658 {
1659         if (!tp->sacked_out)
1660                 return tp->snd_una;
1661 
1662         if (tp->highest_sack == NULL)
1663                 return tp->snd_nxt;
1664 
1665         return TCP_SKB_CB(tp->highest_sack)->seq;
1666 }
1667 
1668 static inline void tcp_advance_highest_sack(struct sock *sk, struct sk_buff *skb)
1669 {
1670         tcp_sk(sk)->highest_sack = tcp_skb_is_last(sk, skb) ? NULL :
1671                                                 tcp_write_queue_next(sk, skb);
1672 }
1673 
1674 static inline struct sk_buff *tcp_highest_sack(struct sock *sk)
1675 {
1676         return tcp_sk(sk)->highest_sack;
1677 }
1678 
1679 static inline void tcp_highest_sack_reset(struct sock *sk)
1680 {
1681         tcp_sk(sk)->highest_sack = tcp_write_queue_head(sk);
1682 }
1683 
1684 /* Called when old skb is about to be deleted (to be combined with new skb) */
1685 static inline void tcp_highest_sack_combine(struct sock *sk,
1686                                             struct sk_buff *old,
1687                                             struct sk_buff *new)
1688 {
1689         if (tcp_sk(sk)->sacked_out && (old == tcp_sk(sk)->highest_sack))
1690                 tcp_sk(sk)->highest_sack = new;
1691 }
1692 
1693 /* This helper checks if socket has IP_TRANSPARENT set */
1694 static inline bool inet_sk_transparent(const struct sock *sk)
1695 {
1696         switch (sk->sk_state) {
1697         case TCP_TIME_WAIT:
1698                 return inet_twsk(sk)->tw_transparent;
1699         case TCP_NEW_SYN_RECV:
1700                 return inet_rsk(inet_reqsk(sk))->no_srccheck;
1701         }
1702         return inet_sk(sk)->transparent;
1703 }
1704 
1705 /* Determines whether this is a thin stream (which may suffer from
1706  * increased latency). Used to trigger latency-reducing mechanisms.
1707  */
1708 static inline bool tcp_stream_is_thin(struct tcp_sock *tp)
1709 {
1710         return tp->packets_out < 4 && !tcp_in_initial_slowstart(tp);
1711 }
1712 
1713 /* /proc */
1714 enum tcp_seq_states {
1715         TCP_SEQ_STATE_LISTENING,
1716         TCP_SEQ_STATE_ESTABLISHED,
1717 };
1718 
1719 int tcp_seq_open(struct inode *inode, struct file *file);
1720 
1721 struct tcp_seq_afinfo {
1722         char                            *name;
1723         sa_family_t                     family;
1724         const struct file_operations    *seq_fops;
1725         struct seq_operations           seq_ops;
1726 };
1727 
1728 struct tcp_iter_state {
1729         struct seq_net_private  p;
1730         sa_family_t             family;
1731         enum tcp_seq_states     state;
1732         struct sock             *syn_wait_sk;
1733         int                     bucket, offset, sbucket, num;
1734         loff_t                  last_pos;
1735 };
1736 
1737 int tcp_proc_register(struct net *net, struct tcp_seq_afinfo *afinfo);
1738 void tcp_proc_unregister(struct net *net, struct tcp_seq_afinfo *afinfo);
1739 
1740 extern struct request_sock_ops tcp_request_sock_ops;
1741 extern struct request_sock_ops tcp6_request_sock_ops;
1742 
1743 void tcp_v4_destroy_sock(struct sock *sk);
1744 
1745 struct sk_buff *tcp_gso_segment(struct sk_buff *skb,
1746                                 netdev_features_t features);
1747 struct sk_buff **tcp_gro_receive(struct sk_buff **head, struct sk_buff *skb);
1748 int tcp_gro_complete(struct sk_buff *skb);
1749 
1750 void __tcp_v4_send_check(struct sk_buff *skb, __be32 saddr, __be32 daddr);
1751 
1752 static inline u32 tcp_notsent_lowat(const struct tcp_sock *tp)
1753 {
1754         struct net *net = sock_net((struct sock *)tp);
1755         return tp->notsent_lowat ?: net->ipv4.sysctl_tcp_notsent_lowat;
1756 }
1757 
1758 static inline bool tcp_stream_memory_free(const struct sock *sk)
1759 {
1760         const struct tcp_sock *tp = tcp_sk(sk);
1761         u32 notsent_bytes = tp->write_seq - tp->snd_nxt;
1762 
1763         return notsent_bytes < tcp_notsent_lowat(tp);
1764 }
1765 
1766 #ifdef CONFIG_PROC_FS
1767 int tcp4_proc_init(void);
1768 void tcp4_proc_exit(void);
1769 #endif
1770 
1771 int tcp_rtx_synack(const struct sock *sk, struct request_sock *req);
1772 int tcp_conn_request(struct request_sock_ops *rsk_ops,
1773                      const struct tcp_request_sock_ops *af_ops,
1774                      struct sock *sk, struct sk_buff *skb);
1775 
1776 /* TCP af-specific functions */
1777 struct tcp_sock_af_ops {
1778 #ifdef CONFIG_TCP_MD5SIG
1779         struct tcp_md5sig_key   *(*md5_lookup) (const struct sock *sk,
1780                                                 const struct sock *addr_sk);
1781         int             (*calc_md5_hash)(char *location,
1782                                          const struct tcp_md5sig_key *md5,
1783                                          const struct sock *sk,
1784                                          const struct sk_buff *skb);
1785         int             (*md5_parse)(struct sock *sk,
1786                                      char __user *optval,
1787                                      int optlen);
1788 #endif
1789 };
1790 
1791 struct tcp_request_sock_ops {
1792         u16 mss_clamp;
1793 #ifdef CONFIG_TCP_MD5SIG
1794         struct tcp_md5sig_key *(*req_md5_lookup)(const struct sock *sk,
1795                                                  const struct sock *addr_sk);
1796         int             (*calc_md5_hash) (char *location,
1797                                           const struct tcp_md5sig_key *md5,
1798                                           const struct sock *sk,
1799                                           const struct sk_buff *skb);
1800 #endif
1801         void (*init_req)(struct request_sock *req,
1802                          const struct sock *sk_listener,
1803                          struct sk_buff *skb);
1804 #ifdef CONFIG_SYN_COOKIES
1805         __u32 (*cookie_init_seq)(const struct sk_buff *skb,
1806                                  __u16 *mss);
1807 #endif
1808         struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl,
1809                                        const struct request_sock *req,
1810                                        bool *strict);
1811         __u32 (*init_seq)(const struct sk_buff *skb);
1812         int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
1813                            struct flowi *fl, struct request_sock *req,
1814                            struct tcp_fastopen_cookie *foc,
1815                            enum tcp_synack_type synack_type);
1816 };
1817 
1818 #ifdef CONFIG_SYN_COOKIES
1819 static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
1820                                          const struct sock *sk, struct sk_buff *skb,
1821                                          __u16 *mss)
1822 {
1823         tcp_synq_overflow(sk);
1824         __NET_INC_STATS(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT);
1825         return ops->cookie_init_seq(skb, mss);
1826 }
1827 #else
1828 static inline __u32 cookie_init_sequence(const struct tcp_request_sock_ops *ops,
1829                                          const struct sock *sk, struct sk_buff *skb,
1830                                          __u16 *mss)
1831 {
1832         return 0;
1833 }
1834 #endif
1835 
1836 int tcpv4_offload_init(void);
1837 
1838 void tcp_v4_init(void);
1839 void tcp_init(void);
1840 
1841 /* tcp_recovery.c */
1842 
1843 /* Flags to enable various loss recovery features. See below */
1844 extern int sysctl_tcp_recovery;
1845 
1846 /* Use TCP RACK to detect (some) tail and retransmit losses */
1847 #define TCP_RACK_LOST_RETRANS  0x1
1848 
1849 extern int tcp_rack_mark_lost(struct sock *sk);
1850 
1851 extern void tcp_rack_advance(struct tcp_sock *tp,
1852                              const struct skb_mstamp *xmit_time, u8 sacked);
1853 
1854 /*
1855  * Save and compile IPv4 options, return a pointer to it
1856  */
1857 static inline struct ip_options_rcu *tcp_v4_save_options(struct sk_buff *skb)
1858 {
1859         const struct ip_options *opt = &TCP_SKB_CB(skb)->header.h4.opt;
1860         struct ip_options_rcu *dopt = NULL;
1861 
1862         if (opt->optlen) {
1863                 int opt_size = sizeof(*dopt) + opt->optlen;
1864 
1865                 dopt = kmalloc(opt_size, GFP_ATOMIC);
1866                 if (dopt && __ip_options_echo(&dopt->opt, skb, opt)) {
1867                         kfree(dopt);
1868                         dopt = NULL;
1869                 }
1870         }
1871         return dopt;
1872 }
1873 
1874 /* locally generated TCP pure ACKs have skb->truesize == 2
1875  * (check tcp_send_ack() in net/ipv4/tcp_output.c )
1876  * This is much faster than dissecting the packet to find out.
1877  * (Think of GRE encapsulations, IPv4, IPv6, ...)
1878  */
1879 static inline bool skb_is_tcp_pure_ack(const struct sk_buff *skb)
1880 {
1881         return skb->truesize == 2;
1882 }
1883 
1884 static inline void skb_set_tcp_pure_ack(struct sk_buff *skb)
1885 {
1886         skb->truesize = 2;
1887 }
1888 
1889 static inline int tcp_inq(struct sock *sk)
1890 {
1891         struct tcp_sock *tp = tcp_sk(sk);
1892         int answ;
1893 
1894         if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV)) {
1895                 answ = 0;
1896         } else if (sock_flag(sk, SOCK_URGINLINE) ||
1897                    !tp->urg_data ||
1898                    before(tp->urg_seq, tp->copied_seq) ||
1899                    !before(tp->urg_seq, tp->rcv_nxt)) {
1900 
1901                 answ = tp->rcv_nxt - tp->copied_seq;
1902 
1903                 /* Subtract 1, if FIN was received */
1904                 if (answ && sock_flag(sk, SOCK_DONE))
1905                         answ--;
1906         } else {
1907                 answ = tp->urg_seq - tp->copied_seq;
1908         }
1909 
1910         return answ;
1911 }
1912 
1913 int tcp_peek_len(struct socket *sock);
1914 
1915 static inline void tcp_segs_in(struct tcp_sock *tp, const struct sk_buff *skb)
1916 {
1917         u16 segs_in;
1918 
1919         segs_in = max_t(u16, 1, skb_shinfo(skb)->gso_segs);
1920         tp->segs_in += segs_in;
1921         if (skb->len > tcp_hdrlen(skb))
1922                 tp->data_segs_in += segs_in;
1923 }
1924 
1925 /*
1926  * TCP listen path runs lockless.
1927  * We forced "struct sock" to be const qualified to make sure
1928  * we don't modify one of its field by mistake.
1929  * Here, we increment sk_drops which is an atomic_t, so we can safely
1930  * make sock writable again.
1931  */
1932 static inline void tcp_listendrop(const struct sock *sk)
1933 {
1934         atomic_inc(&((struct sock *)sk)->sk_drops);
1935         __NET_INC_STATS(sock_net(sk), LINUX_MIB_LISTENDROPS);
1936 }
1937 
1938 #endif  /* _TCP_H */
1939 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us