Version:  2.0.40 2.2.26 2.4.37 3.9 3.10 3.11 3.12 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5 4.6

Linux/net/ipv4/tcp_westwood.c

  1 /*
  2  * TCP Westwood+: end-to-end bandwidth estimation for TCP
  3  *
  4  *      Angelo Dell'Aera: author of the first version of TCP Westwood+ in Linux 2.4
  5  *
  6  * Support at http://c3lab.poliba.it/index.php/Westwood
  7  * Main references in literature:
  8  *
  9  * - Mascolo S, Casetti, M. Gerla et al.
 10  *   "TCP Westwood: bandwidth estimation for TCP" Proc. ACM Mobicom 2001
 11  *
 12  * - A. Grieco, s. Mascolo
 13  *   "Performance evaluation of New Reno, Vegas, Westwood+ TCP" ACM Computer
 14  *     Comm. Review, 2004
 15  *
 16  * - A. Dell'Aera, L. Grieco, S. Mascolo.
 17  *   "Linux 2.4 Implementation of Westwood+ TCP with Rate-Halving :
 18  *    A Performance Evaluation Over the Internet" (ICC 2004), Paris, June 2004
 19  *
 20  * Westwood+ employs end-to-end bandwidth measurement to set cwnd and
 21  * ssthresh after packet loss. The probing phase is as the original Reno.
 22  */
 23 
 24 #include <linux/mm.h>
 25 #include <linux/module.h>
 26 #include <linux/skbuff.h>
 27 #include <linux/inet_diag.h>
 28 #include <net/tcp.h>
 29 
 30 /* TCP Westwood structure */
 31 struct westwood {
 32         u32    bw_ns_est;        /* first bandwidth estimation..not too smoothed 8) */
 33         u32    bw_est;           /* bandwidth estimate */
 34         u32    rtt_win_sx;       /* here starts a new evaluation... */
 35         u32    bk;
 36         u32    snd_una;          /* used for evaluating the number of acked bytes */
 37         u32    cumul_ack;
 38         u32    accounted;
 39         u32    rtt;
 40         u32    rtt_min;          /* minimum observed RTT */
 41         u8     first_ack;        /* flag which infers that this is the first ack */
 42         u8     reset_rtt_min;    /* Reset RTT min to next RTT sample*/
 43 };
 44 
 45 /* TCP Westwood functions and constants */
 46 #define TCP_WESTWOOD_RTT_MIN   (HZ/20)  /* 50ms */
 47 #define TCP_WESTWOOD_INIT_RTT  (20*HZ)  /* maybe too conservative?! */
 48 
 49 /*
 50  * @tcp_westwood_create
 51  * This function initializes fields used in TCP Westwood+,
 52  * it is called after the initial SYN, so the sequence numbers
 53  * are correct but new passive connections we have no
 54  * information about RTTmin at this time so we simply set it to
 55  * TCP_WESTWOOD_INIT_RTT. This value was chosen to be too conservative
 56  * since in this way we're sure it will be updated in a consistent
 57  * way as soon as possible. It will reasonably happen within the first
 58  * RTT period of the connection lifetime.
 59  */
 60 static void tcp_westwood_init(struct sock *sk)
 61 {
 62         struct westwood *w = inet_csk_ca(sk);
 63 
 64         w->bk = 0;
 65         w->bw_ns_est = 0;
 66         w->bw_est = 0;
 67         w->accounted = 0;
 68         w->cumul_ack = 0;
 69         w->reset_rtt_min = 1;
 70         w->rtt_min = w->rtt = TCP_WESTWOOD_INIT_RTT;
 71         w->rtt_win_sx = tcp_time_stamp;
 72         w->snd_una = tcp_sk(sk)->snd_una;
 73         w->first_ack = 1;
 74 }
 75 
 76 /*
 77  * @westwood_do_filter
 78  * Low-pass filter. Implemented using constant coefficients.
 79  */
 80 static inline u32 westwood_do_filter(u32 a, u32 b)
 81 {
 82         return ((7 * a) + b) >> 3;
 83 }
 84 
 85 static void westwood_filter(struct westwood *w, u32 delta)
 86 {
 87         /* If the filter is empty fill it with the first sample of bandwidth  */
 88         if (w->bw_ns_est == 0 && w->bw_est == 0) {
 89                 w->bw_ns_est = w->bk / delta;
 90                 w->bw_est = w->bw_ns_est;
 91         } else {
 92                 w->bw_ns_est = westwood_do_filter(w->bw_ns_est, w->bk / delta);
 93                 w->bw_est = westwood_do_filter(w->bw_est, w->bw_ns_est);
 94         }
 95 }
 96 
 97 /*
 98  * @westwood_pkts_acked
 99  * Called after processing group of packets.
100  * but all westwood needs is the last sample of srtt.
101  */
102 static void tcp_westwood_pkts_acked(struct sock *sk, u32 cnt, s32 rtt)
103 {
104         struct westwood *w = inet_csk_ca(sk);
105 
106         if (rtt > 0)
107                 w->rtt = usecs_to_jiffies(rtt);
108 }
109 
110 /*
111  * @westwood_update_window
112  * It updates RTT evaluation window if it is the right moment to do
113  * it. If so it calls filter for evaluating bandwidth.
114  */
115 static void westwood_update_window(struct sock *sk)
116 {
117         struct westwood *w = inet_csk_ca(sk);
118         s32 delta = tcp_time_stamp - w->rtt_win_sx;
119 
120         /* Initialize w->snd_una with the first acked sequence number in order
121          * to fix mismatch between tp->snd_una and w->snd_una for the first
122          * bandwidth sample
123          */
124         if (w->first_ack) {
125                 w->snd_una = tcp_sk(sk)->snd_una;
126                 w->first_ack = 0;
127         }
128 
129         /*
130          * See if a RTT-window has passed.
131          * Be careful since if RTT is less than
132          * 50ms we don't filter but we continue 'building the sample'.
133          * This minimum limit was chosen since an estimation on small
134          * time intervals is better to avoid...
135          * Obviously on a LAN we reasonably will always have
136          * right_bound = left_bound + WESTWOOD_RTT_MIN
137          */
138         if (w->rtt && delta > max_t(u32, w->rtt, TCP_WESTWOOD_RTT_MIN)) {
139                 westwood_filter(w, delta);
140 
141                 w->bk = 0;
142                 w->rtt_win_sx = tcp_time_stamp;
143         }
144 }
145 
146 static inline void update_rtt_min(struct westwood *w)
147 {
148         if (w->reset_rtt_min) {
149                 w->rtt_min = w->rtt;
150                 w->reset_rtt_min = 0;
151         } else
152                 w->rtt_min = min(w->rtt, w->rtt_min);
153 }
154 
155 /*
156  * @westwood_fast_bw
157  * It is called when we are in fast path. In particular it is called when
158  * header prediction is successful. In such case in fact update is
159  * straight forward and doesn't need any particular care.
160  */
161 static inline void westwood_fast_bw(struct sock *sk)
162 {
163         const struct tcp_sock *tp = tcp_sk(sk);
164         struct westwood *w = inet_csk_ca(sk);
165 
166         westwood_update_window(sk);
167 
168         w->bk += tp->snd_una - w->snd_una;
169         w->snd_una = tp->snd_una;
170         update_rtt_min(w);
171 }
172 
173 /*
174  * @westwood_acked_count
175  * This function evaluates cumul_ack for evaluating bk in case of
176  * delayed or partial acks.
177  */
178 static inline u32 westwood_acked_count(struct sock *sk)
179 {
180         const struct tcp_sock *tp = tcp_sk(sk);
181         struct westwood *w = inet_csk_ca(sk);
182 
183         w->cumul_ack = tp->snd_una - w->snd_una;
184 
185         /* If cumul_ack is 0 this is a dupack since it's not moving
186          * tp->snd_una.
187          */
188         if (!w->cumul_ack) {
189                 w->accounted += tp->mss_cache;
190                 w->cumul_ack = tp->mss_cache;
191         }
192 
193         if (w->cumul_ack > tp->mss_cache) {
194                 /* Partial or delayed ack */
195                 if (w->accounted >= w->cumul_ack) {
196                         w->accounted -= w->cumul_ack;
197                         w->cumul_ack = tp->mss_cache;
198                 } else {
199                         w->cumul_ack -= w->accounted;
200                         w->accounted = 0;
201                 }
202         }
203 
204         w->snd_una = tp->snd_una;
205 
206         return w->cumul_ack;
207 }
208 
209 /*
210  * TCP Westwood
211  * Here limit is evaluated as Bw estimation*RTTmin (for obtaining it
212  * in packets we use mss_cache). Rttmin is guaranteed to be >= 2
213  * so avoids ever returning 0.
214  */
215 static u32 tcp_westwood_bw_rttmin(const struct sock *sk)
216 {
217         const struct tcp_sock *tp = tcp_sk(sk);
218         const struct westwood *w = inet_csk_ca(sk);
219 
220         return max_t(u32, (w->bw_est * w->rtt_min) / tp->mss_cache, 2);
221 }
222 
223 static void tcp_westwood_ack(struct sock *sk, u32 ack_flags)
224 {
225         if (ack_flags & CA_ACK_SLOWPATH) {
226                 struct westwood *w = inet_csk_ca(sk);
227 
228                 westwood_update_window(sk);
229                 w->bk += westwood_acked_count(sk);
230 
231                 update_rtt_min(w);
232                 return;
233         }
234 
235         westwood_fast_bw(sk);
236 }
237 
238 static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event)
239 {
240         struct tcp_sock *tp = tcp_sk(sk);
241         struct westwood *w = inet_csk_ca(sk);
242 
243         switch (event) {
244         case CA_EVENT_COMPLETE_CWR:
245                 tp->snd_cwnd = tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
246                 break;
247         case CA_EVENT_LOSS:
248                 tp->snd_ssthresh = tcp_westwood_bw_rttmin(sk);
249                 /* Update RTT_min when next ack arrives */
250                 w->reset_rtt_min = 1;
251                 break;
252         default:
253                 /* don't care */
254                 break;
255         }
256 }
257 
258 /* Extract info for Tcp socket info provided via netlink. */
259 static size_t tcp_westwood_info(struct sock *sk, u32 ext, int *attr,
260                                 union tcp_cc_info *info)
261 {
262         const struct westwood *ca = inet_csk_ca(sk);
263 
264         if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
265                 info->vegas.tcpv_enabled = 1;
266                 info->vegas.tcpv_rttcnt = 0;
267                 info->vegas.tcpv_rtt    = jiffies_to_usecs(ca->rtt),
268                 info->vegas.tcpv_minrtt = jiffies_to_usecs(ca->rtt_min),
269 
270                 *attr = INET_DIAG_VEGASINFO;
271                 return sizeof(struct tcpvegas_info);
272         }
273         return 0;
274 }
275 
276 static struct tcp_congestion_ops tcp_westwood __read_mostly = {
277         .init           = tcp_westwood_init,
278         .ssthresh       = tcp_reno_ssthresh,
279         .cong_avoid     = tcp_reno_cong_avoid,
280         .cwnd_event     = tcp_westwood_event,
281         .in_ack_event   = tcp_westwood_ack,
282         .get_info       = tcp_westwood_info,
283         .pkts_acked     = tcp_westwood_pkts_acked,
284 
285         .owner          = THIS_MODULE,
286         .name           = "westwood"
287 };
288 
289 static int __init tcp_westwood_register(void)
290 {
291         BUILD_BUG_ON(sizeof(struct westwood) > ICSK_CA_PRIV_SIZE);
292         return tcp_register_congestion_control(&tcp_westwood);
293 }
294 
295 static void __exit tcp_westwood_unregister(void)
296 {
297         tcp_unregister_congestion_control(&tcp_westwood);
298 }
299 
300 module_init(tcp_westwood_register);
301 module_exit(tcp_westwood_unregister);
302 
303 MODULE_AUTHOR("Stephen Hemminger, Angelo Dell'Aera");
304 MODULE_LICENSE("GPL");
305 MODULE_DESCRIPTION("TCP Westwood+");
306 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us