Version:  2.0.40 2.2.26 2.4.37 3.10 3.11 3.12 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5 4.6 4.7

Linux/net/sched/sch_htb.c

  1 /*
  2  * net/sched/sch_htb.c  Hierarchical token bucket, feed tree version
  3  *
  4  *              This program is free software; you can redistribute it and/or
  5  *              modify it under the terms of the GNU General Public License
  6  *              as published by the Free Software Foundation; either version
  7  *              2 of the License, or (at your option) any later version.
  8  *
  9  * Authors:     Martin Devera, <devik@cdi.cz>
 10  *
 11  * Credits (in time order) for older HTB versions:
 12  *              Stef Coene <stef.coene@docum.org>
 13  *                      HTB support at LARTC mailing list
 14  *              Ondrej Kraus, <krauso@barr.cz>
 15  *                      found missing INIT_QDISC(htb)
 16  *              Vladimir Smelhaus, Aamer Akhter, Bert Hubert
 17  *                      helped a lot to locate nasty class stall bug
 18  *              Andi Kleen, Jamal Hadi, Bert Hubert
 19  *                      code review and helpful comments on shaping
 20  *              Tomasz Wrona, <tw@eter.tym.pl>
 21  *                      created test case so that I was able to fix nasty bug
 22  *              Wilfried Weissmann
 23  *                      spotted bug in dequeue code and helped with fix
 24  *              Jiri Fojtasek
 25  *                      fixed requeue routine
 26  *              and many others. thanks.
 27  */
 28 #include <linux/module.h>
 29 #include <linux/moduleparam.h>
 30 #include <linux/types.h>
 31 #include <linux/kernel.h>
 32 #include <linux/string.h>
 33 #include <linux/errno.h>
 34 #include <linux/skbuff.h>
 35 #include <linux/list.h>
 36 #include <linux/compiler.h>
 37 #include <linux/rbtree.h>
 38 #include <linux/workqueue.h>
 39 #include <linux/slab.h>
 40 #include <net/netlink.h>
 41 #include <net/sch_generic.h>
 42 #include <net/pkt_sched.h>
 43 
 44 /* HTB algorithm.
 45     Author: devik@cdi.cz
 46     ========================================================================
 47     HTB is like TBF with multiple classes. It is also similar to CBQ because
 48     it allows to assign priority to each class in hierarchy.
 49     In fact it is another implementation of Floyd's formal sharing.
 50 
 51     Levels:
 52     Each class is assigned level. Leaf has ALWAYS level 0 and root
 53     classes have level TC_HTB_MAXDEPTH-1. Interior nodes has level
 54     one less than their parent.
 55 */
 56 
 57 static int htb_hysteresis __read_mostly = 0; /* whether to use mode hysteresis for speedup */
 58 #define HTB_VER 0x30011         /* major must be matched with number suplied by TC as version */
 59 
 60 #if HTB_VER >> 16 != TC_HTB_PROTOVER
 61 #error "Mismatched sch_htb.c and pkt_sch.h"
 62 #endif
 63 
 64 /* Module parameter and sysfs export */
 65 module_param    (htb_hysteresis, int, 0640);
 66 MODULE_PARM_DESC(htb_hysteresis, "Hysteresis mode, less CPU load, less accurate");
 67 
 68 static int htb_rate_est = 0; /* htb classes have a default rate estimator */
 69 module_param(htb_rate_est, int, 0640);
 70 MODULE_PARM_DESC(htb_rate_est, "setup a default rate estimator (4sec 16sec) for htb classes");
 71 
 72 /* used internaly to keep status of single class */
 73 enum htb_cmode {
 74         HTB_CANT_SEND,          /* class can't send and can't borrow */
 75         HTB_MAY_BORROW,         /* class can't send but may borrow */
 76         HTB_CAN_SEND            /* class can send */
 77 };
 78 
 79 struct htb_prio {
 80         union {
 81                 struct rb_root  row;
 82                 struct rb_root  feed;
 83         };
 84         struct rb_node  *ptr;
 85         /* When class changes from state 1->2 and disconnects from
 86          * parent's feed then we lost ptr value and start from the
 87          * first child again. Here we store classid of the
 88          * last valid ptr (used when ptr is NULL).
 89          */
 90         u32             last_ptr_id;
 91 };
 92 
 93 /* interior & leaf nodes; props specific to leaves are marked L:
 94  * To reduce false sharing, place mostly read fields at beginning,
 95  * and mostly written ones at the end.
 96  */
 97 struct htb_class {
 98         struct Qdisc_class_common common;
 99         struct psched_ratecfg   rate;
100         struct psched_ratecfg   ceil;
101         s64                     buffer, cbuffer;/* token bucket depth/rate */
102         s64                     mbuffer;        /* max wait time */
103         u32                     prio;           /* these two are used only by leaves... */
104         int                     quantum;        /* but stored for parent-to-leaf return */
105 
106         struct tcf_proto __rcu  *filter_list;   /* class attached filters */
107         int                     filter_cnt;
108         int                     refcnt;         /* usage count of this class */
109 
110         int                     level;          /* our level (see above) */
111         unsigned int            children;
112         struct htb_class        *parent;        /* parent class */
113 
114         struct gnet_stats_rate_est64 rate_est;
115 
116         /*
117          * Written often fields
118          */
119         struct gnet_stats_basic_packed bstats;
120         struct gnet_stats_queue qstats;
121         struct tc_htb_xstats    xstats; /* our special stats */
122 
123         /* token bucket parameters */
124         s64                     tokens, ctokens;/* current number of tokens */
125         s64                     t_c;            /* checkpoint time */
126 
127         union {
128                 struct htb_class_leaf {
129                         struct list_head drop_list;
130                         int             deficit[TC_HTB_MAXDEPTH];
131                         struct Qdisc    *q;
132                 } leaf;
133                 struct htb_class_inner {
134                         struct htb_prio clprio[TC_HTB_NUMPRIO];
135                 } inner;
136         } un;
137         s64                     pq_key;
138 
139         int                     prio_activity;  /* for which prios are we active */
140         enum htb_cmode          cmode;          /* current mode of the class */
141         struct rb_node          pq_node;        /* node for event queue */
142         struct rb_node          node[TC_HTB_NUMPRIO];   /* node for self or feed tree */
143 };
144 
145 struct htb_level {
146         struct rb_root  wait_pq;
147         struct htb_prio hprio[TC_HTB_NUMPRIO];
148 };
149 
150 struct htb_sched {
151         struct Qdisc_class_hash clhash;
152         int                     defcls;         /* class where unclassified flows go to */
153         int                     rate2quantum;   /* quant = rate / rate2quantum */
154 
155         /* filters for qdisc itself */
156         struct tcf_proto __rcu  *filter_list;
157 
158 #define HTB_WARN_TOOMANYEVENTS  0x1
159         unsigned int            warned; /* only one warning */
160         int                     direct_qlen;
161         struct work_struct      work;
162 
163         /* non shaped skbs; let them go directly thru */
164         struct sk_buff_head     direct_queue;
165         long                    direct_pkts;
166 
167         struct qdisc_watchdog   watchdog;
168 
169         s64                     now;    /* cached dequeue time */
170         struct list_head        drops[TC_HTB_NUMPRIO];/* active leaves (for drops) */
171 
172         /* time of nearest event per level (row) */
173         s64                     near_ev_cache[TC_HTB_MAXDEPTH];
174 
175         int                     row_mask[TC_HTB_MAXDEPTH];
176 
177         struct htb_level        hlevel[TC_HTB_MAXDEPTH];
178 };
179 
180 /* find class in global hash table using given handle */
181 static inline struct htb_class *htb_find(u32 handle, struct Qdisc *sch)
182 {
183         struct htb_sched *q = qdisc_priv(sch);
184         struct Qdisc_class_common *clc;
185 
186         clc = qdisc_class_find(&q->clhash, handle);
187         if (clc == NULL)
188                 return NULL;
189         return container_of(clc, struct htb_class, common);
190 }
191 
192 /**
193  * htb_classify - classify a packet into class
194  *
195  * It returns NULL if the packet should be dropped or -1 if the packet
196  * should be passed directly thru. In all other cases leaf class is returned.
197  * We allow direct class selection by classid in priority. The we examine
198  * filters in qdisc and in inner nodes (if higher filter points to the inner
199  * node). If we end up with classid MAJOR:0 we enqueue the skb into special
200  * internal fifo (direct). These packets then go directly thru. If we still
201  * have no valid leaf we try to use MAJOR:default leaf. It still unsuccessful
202  * then finish and return direct queue.
203  */
204 #define HTB_DIRECT ((struct htb_class *)-1L)
205 
206 static struct htb_class *htb_classify(struct sk_buff *skb, struct Qdisc *sch,
207                                       int *qerr)
208 {
209         struct htb_sched *q = qdisc_priv(sch);
210         struct htb_class *cl;
211         struct tcf_result res;
212         struct tcf_proto *tcf;
213         int result;
214 
215         /* allow to select class by setting skb->priority to valid classid;
216          * note that nfmark can be used too by attaching filter fw with no
217          * rules in it
218          */
219         if (skb->priority == sch->handle)
220                 return HTB_DIRECT;      /* X:0 (direct flow) selected */
221         cl = htb_find(skb->priority, sch);
222         if (cl) {
223                 if (cl->level == 0)
224                         return cl;
225                 /* Start with inner filter chain if a non-leaf class is selected */
226                 tcf = rcu_dereference_bh(cl->filter_list);
227         } else {
228                 tcf = rcu_dereference_bh(q->filter_list);
229         }
230 
231         *qerr = NET_XMIT_SUCCESS | __NET_XMIT_BYPASS;
232         while (tcf && (result = tc_classify(skb, tcf, &res, false)) >= 0) {
233 #ifdef CONFIG_NET_CLS_ACT
234                 switch (result) {
235                 case TC_ACT_QUEUED:
236                 case TC_ACT_STOLEN:
237                         *qerr = NET_XMIT_SUCCESS | __NET_XMIT_STOLEN;
238                 case TC_ACT_SHOT:
239                         return NULL;
240                 }
241 #endif
242                 cl = (void *)res.class;
243                 if (!cl) {
244                         if (res.classid == sch->handle)
245                                 return HTB_DIRECT;      /* X:0 (direct flow) */
246                         cl = htb_find(res.classid, sch);
247                         if (!cl)
248                                 break;  /* filter selected invalid classid */
249                 }
250                 if (!cl->level)
251                         return cl;      /* we hit leaf; return it */
252 
253                 /* we have got inner class; apply inner filter chain */
254                 tcf = rcu_dereference_bh(cl->filter_list);
255         }
256         /* classification failed; try to use default class */
257         cl = htb_find(TC_H_MAKE(TC_H_MAJ(sch->handle), q->defcls), sch);
258         if (!cl || cl->level)
259                 return HTB_DIRECT;      /* bad default .. this is safe bet */
260         return cl;
261 }
262 
263 /**
264  * htb_add_to_id_tree - adds class to the round robin list
265  *
266  * Routine adds class to the list (actually tree) sorted by classid.
267  * Make sure that class is not already on such list for given prio.
268  */
269 static void htb_add_to_id_tree(struct rb_root *root,
270                                struct htb_class *cl, int prio)
271 {
272         struct rb_node **p = &root->rb_node, *parent = NULL;
273 
274         while (*p) {
275                 struct htb_class *c;
276                 parent = *p;
277                 c = rb_entry(parent, struct htb_class, node[prio]);
278 
279                 if (cl->common.classid > c->common.classid)
280                         p = &parent->rb_right;
281                 else
282                         p = &parent->rb_left;
283         }
284         rb_link_node(&cl->node[prio], parent, p);
285         rb_insert_color(&cl->node[prio], root);
286 }
287 
288 /**
289  * htb_add_to_wait_tree - adds class to the event queue with delay
290  *
291  * The class is added to priority event queue to indicate that class will
292  * change its mode in cl->pq_key microseconds. Make sure that class is not
293  * already in the queue.
294  */
295 static void htb_add_to_wait_tree(struct htb_sched *q,
296                                  struct htb_class *cl, s64 delay)
297 {
298         struct rb_node **p = &q->hlevel[cl->level].wait_pq.rb_node, *parent = NULL;
299 
300         cl->pq_key = q->now + delay;
301         if (cl->pq_key == q->now)
302                 cl->pq_key++;
303 
304         /* update the nearest event cache */
305         if (q->near_ev_cache[cl->level] > cl->pq_key)
306                 q->near_ev_cache[cl->level] = cl->pq_key;
307 
308         while (*p) {
309                 struct htb_class *c;
310                 parent = *p;
311                 c = rb_entry(parent, struct htb_class, pq_node);
312                 if (cl->pq_key >= c->pq_key)
313                         p = &parent->rb_right;
314                 else
315                         p = &parent->rb_left;
316         }
317         rb_link_node(&cl->pq_node, parent, p);
318         rb_insert_color(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
319 }
320 
321 /**
322  * htb_next_rb_node - finds next node in binary tree
323  *
324  * When we are past last key we return NULL.
325  * Average complexity is 2 steps per call.
326  */
327 static inline void htb_next_rb_node(struct rb_node **n)
328 {
329         *n = rb_next(*n);
330 }
331 
332 /**
333  * htb_add_class_to_row - add class to its row
334  *
335  * The class is added to row at priorities marked in mask.
336  * It does nothing if mask == 0.
337  */
338 static inline void htb_add_class_to_row(struct htb_sched *q,
339                                         struct htb_class *cl, int mask)
340 {
341         q->row_mask[cl->level] |= mask;
342         while (mask) {
343                 int prio = ffz(~mask);
344                 mask &= ~(1 << prio);
345                 htb_add_to_id_tree(&q->hlevel[cl->level].hprio[prio].row, cl, prio);
346         }
347 }
348 
349 /* If this triggers, it is a bug in this code, but it need not be fatal */
350 static void htb_safe_rb_erase(struct rb_node *rb, struct rb_root *root)
351 {
352         if (RB_EMPTY_NODE(rb)) {
353                 WARN_ON(1);
354         } else {
355                 rb_erase(rb, root);
356                 RB_CLEAR_NODE(rb);
357         }
358 }
359 
360 
361 /**
362  * htb_remove_class_from_row - removes class from its row
363  *
364  * The class is removed from row at priorities marked in mask.
365  * It does nothing if mask == 0.
366  */
367 static inline void htb_remove_class_from_row(struct htb_sched *q,
368                                                  struct htb_class *cl, int mask)
369 {
370         int m = 0;
371         struct htb_level *hlevel = &q->hlevel[cl->level];
372 
373         while (mask) {
374                 int prio = ffz(~mask);
375                 struct htb_prio *hprio = &hlevel->hprio[prio];
376 
377                 mask &= ~(1 << prio);
378                 if (hprio->ptr == cl->node + prio)
379                         htb_next_rb_node(&hprio->ptr);
380 
381                 htb_safe_rb_erase(cl->node + prio, &hprio->row);
382                 if (!hprio->row.rb_node)
383                         m |= 1 << prio;
384         }
385         q->row_mask[cl->level] &= ~m;
386 }
387 
388 /**
389  * htb_activate_prios - creates active classe's feed chain
390  *
391  * The class is connected to ancestors and/or appropriate rows
392  * for priorities it is participating on. cl->cmode must be new
393  * (activated) mode. It does nothing if cl->prio_activity == 0.
394  */
395 static void htb_activate_prios(struct htb_sched *q, struct htb_class *cl)
396 {
397         struct htb_class *p = cl->parent;
398         long m, mask = cl->prio_activity;
399 
400         while (cl->cmode == HTB_MAY_BORROW && p && mask) {
401                 m = mask;
402                 while (m) {
403                         int prio = ffz(~m);
404                         m &= ~(1 << prio);
405 
406                         if (p->un.inner.clprio[prio].feed.rb_node)
407                                 /* parent already has its feed in use so that
408                                  * reset bit in mask as parent is already ok
409                                  */
410                                 mask &= ~(1 << prio);
411 
412                         htb_add_to_id_tree(&p->un.inner.clprio[prio].feed, cl, prio);
413                 }
414                 p->prio_activity |= mask;
415                 cl = p;
416                 p = cl->parent;
417 
418         }
419         if (cl->cmode == HTB_CAN_SEND && mask)
420                 htb_add_class_to_row(q, cl, mask);
421 }
422 
423 /**
424  * htb_deactivate_prios - remove class from feed chain
425  *
426  * cl->cmode must represent old mode (before deactivation). It does
427  * nothing if cl->prio_activity == 0. Class is removed from all feed
428  * chains and rows.
429  */
430 static void htb_deactivate_prios(struct htb_sched *q, struct htb_class *cl)
431 {
432         struct htb_class *p = cl->parent;
433         long m, mask = cl->prio_activity;
434 
435         while (cl->cmode == HTB_MAY_BORROW && p && mask) {
436                 m = mask;
437                 mask = 0;
438                 while (m) {
439                         int prio = ffz(~m);
440                         m &= ~(1 << prio);
441 
442                         if (p->un.inner.clprio[prio].ptr == cl->node + prio) {
443                                 /* we are removing child which is pointed to from
444                                  * parent feed - forget the pointer but remember
445                                  * classid
446                                  */
447                                 p->un.inner.clprio[prio].last_ptr_id = cl->common.classid;
448                                 p->un.inner.clprio[prio].ptr = NULL;
449                         }
450 
451                         htb_safe_rb_erase(cl->node + prio,
452                                           &p->un.inner.clprio[prio].feed);
453 
454                         if (!p->un.inner.clprio[prio].feed.rb_node)
455                                 mask |= 1 << prio;
456                 }
457 
458                 p->prio_activity &= ~mask;
459                 cl = p;
460                 p = cl->parent;
461 
462         }
463         if (cl->cmode == HTB_CAN_SEND && mask)
464                 htb_remove_class_from_row(q, cl, mask);
465 }
466 
467 static inline s64 htb_lowater(const struct htb_class *cl)
468 {
469         if (htb_hysteresis)
470                 return cl->cmode != HTB_CANT_SEND ? -cl->cbuffer : 0;
471         else
472                 return 0;
473 }
474 static inline s64 htb_hiwater(const struct htb_class *cl)
475 {
476         if (htb_hysteresis)
477                 return cl->cmode == HTB_CAN_SEND ? -cl->buffer : 0;
478         else
479                 return 0;
480 }
481 
482 
483 /**
484  * htb_class_mode - computes and returns current class mode
485  *
486  * It computes cl's mode at time cl->t_c+diff and returns it. If mode
487  * is not HTB_CAN_SEND then cl->pq_key is updated to time difference
488  * from now to time when cl will change its state.
489  * Also it is worth to note that class mode doesn't change simply
490  * at cl->{c,}tokens == 0 but there can rather be hysteresis of
491  * 0 .. -cl->{c,}buffer range. It is meant to limit number of
492  * mode transitions per time unit. The speed gain is about 1/6.
493  */
494 static inline enum htb_cmode
495 htb_class_mode(struct htb_class *cl, s64 *diff)
496 {
497         s64 toks;
498 
499         if ((toks = (cl->ctokens + *diff)) < htb_lowater(cl)) {
500                 *diff = -toks;
501                 return HTB_CANT_SEND;
502         }
503 
504         if ((toks = (cl->tokens + *diff)) >= htb_hiwater(cl))
505                 return HTB_CAN_SEND;
506 
507         *diff = -toks;
508         return HTB_MAY_BORROW;
509 }
510 
511 /**
512  * htb_change_class_mode - changes classe's mode
513  *
514  * This should be the only way how to change classe's mode under normal
515  * cirsumstances. Routine will update feed lists linkage, change mode
516  * and add class to the wait event queue if appropriate. New mode should
517  * be different from old one and cl->pq_key has to be valid if changing
518  * to mode other than HTB_CAN_SEND (see htb_add_to_wait_tree).
519  */
520 static void
521 htb_change_class_mode(struct htb_sched *q, struct htb_class *cl, s64 *diff)
522 {
523         enum htb_cmode new_mode = htb_class_mode(cl, diff);
524 
525         if (new_mode == cl->cmode)
526                 return;
527 
528         if (cl->prio_activity) {        /* not necessary: speed optimization */
529                 if (cl->cmode != HTB_CANT_SEND)
530                         htb_deactivate_prios(q, cl);
531                 cl->cmode = new_mode;
532                 if (new_mode != HTB_CANT_SEND)
533                         htb_activate_prios(q, cl);
534         } else
535                 cl->cmode = new_mode;
536 }
537 
538 /**
539  * htb_activate - inserts leaf cl into appropriate active feeds
540  *
541  * Routine learns (new) priority of leaf and activates feed chain
542  * for the prio. It can be called on already active leaf safely.
543  * It also adds leaf into droplist.
544  */
545 static inline void htb_activate(struct htb_sched *q, struct htb_class *cl)
546 {
547         WARN_ON(cl->level || !cl->un.leaf.q || !cl->un.leaf.q->q.qlen);
548 
549         if (!cl->prio_activity) {
550                 cl->prio_activity = 1 << cl->prio;
551                 htb_activate_prios(q, cl);
552                 list_add_tail(&cl->un.leaf.drop_list,
553                               q->drops + cl->prio);
554         }
555 }
556 
557 /**
558  * htb_deactivate - remove leaf cl from active feeds
559  *
560  * Make sure that leaf is active. In the other words it can't be called
561  * with non-active leaf. It also removes class from the drop list.
562  */
563 static inline void htb_deactivate(struct htb_sched *q, struct htb_class *cl)
564 {
565         WARN_ON(!cl->prio_activity);
566 
567         htb_deactivate_prios(q, cl);
568         cl->prio_activity = 0;
569         list_del_init(&cl->un.leaf.drop_list);
570 }
571 
572 static int htb_enqueue(struct sk_buff *skb, struct Qdisc *sch)
573 {
574         int uninitialized_var(ret);
575         struct htb_sched *q = qdisc_priv(sch);
576         struct htb_class *cl = htb_classify(skb, sch, &ret);
577 
578         if (cl == HTB_DIRECT) {
579                 /* enqueue to helper queue */
580                 if (q->direct_queue.qlen < q->direct_qlen) {
581                         __skb_queue_tail(&q->direct_queue, skb);
582                         q->direct_pkts++;
583                 } else {
584                         return qdisc_drop(skb, sch);
585                 }
586 #ifdef CONFIG_NET_CLS_ACT
587         } else if (!cl) {
588                 if (ret & __NET_XMIT_BYPASS)
589                         qdisc_qstats_drop(sch);
590                 kfree_skb(skb);
591                 return ret;
592 #endif
593         } else if ((ret = qdisc_enqueue(skb, cl->un.leaf.q)) != NET_XMIT_SUCCESS) {
594                 if (net_xmit_drop_count(ret)) {
595                         qdisc_qstats_drop(sch);
596                         cl->qstats.drops++;
597                 }
598                 return ret;
599         } else {
600                 htb_activate(q, cl);
601         }
602 
603         qdisc_qstats_backlog_inc(sch, skb);
604         sch->q.qlen++;
605         return NET_XMIT_SUCCESS;
606 }
607 
608 static inline void htb_accnt_tokens(struct htb_class *cl, int bytes, s64 diff)
609 {
610         s64 toks = diff + cl->tokens;
611 
612         if (toks > cl->buffer)
613                 toks = cl->buffer;
614         toks -= (s64) psched_l2t_ns(&cl->rate, bytes);
615         if (toks <= -cl->mbuffer)
616                 toks = 1 - cl->mbuffer;
617 
618         cl->tokens = toks;
619 }
620 
621 static inline void htb_accnt_ctokens(struct htb_class *cl, int bytes, s64 diff)
622 {
623         s64 toks = diff + cl->ctokens;
624 
625         if (toks > cl->cbuffer)
626                 toks = cl->cbuffer;
627         toks -= (s64) psched_l2t_ns(&cl->ceil, bytes);
628         if (toks <= -cl->mbuffer)
629                 toks = 1 - cl->mbuffer;
630 
631         cl->ctokens = toks;
632 }
633 
634 /**
635  * htb_charge_class - charges amount "bytes" to leaf and ancestors
636  *
637  * Routine assumes that packet "bytes" long was dequeued from leaf cl
638  * borrowing from "level". It accounts bytes to ceil leaky bucket for
639  * leaf and all ancestors and to rate bucket for ancestors at levels
640  * "level" and higher. It also handles possible change of mode resulting
641  * from the update. Note that mode can also increase here (MAY_BORROW to
642  * CAN_SEND) because we can use more precise clock that event queue here.
643  * In such case we remove class from event queue first.
644  */
645 static void htb_charge_class(struct htb_sched *q, struct htb_class *cl,
646                              int level, struct sk_buff *skb)
647 {
648         int bytes = qdisc_pkt_len(skb);
649         enum htb_cmode old_mode;
650         s64 diff;
651 
652         while (cl) {
653                 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
654                 if (cl->level >= level) {
655                         if (cl->level == level)
656                                 cl->xstats.lends++;
657                         htb_accnt_tokens(cl, bytes, diff);
658                 } else {
659                         cl->xstats.borrows++;
660                         cl->tokens += diff;     /* we moved t_c; update tokens */
661                 }
662                 htb_accnt_ctokens(cl, bytes, diff);
663                 cl->t_c = q->now;
664 
665                 old_mode = cl->cmode;
666                 diff = 0;
667                 htb_change_class_mode(q, cl, &diff);
668                 if (old_mode != cl->cmode) {
669                         if (old_mode != HTB_CAN_SEND)
670                                 htb_safe_rb_erase(&cl->pq_node, &q->hlevel[cl->level].wait_pq);
671                         if (cl->cmode != HTB_CAN_SEND)
672                                 htb_add_to_wait_tree(q, cl, diff);
673                 }
674 
675                 /* update basic stats except for leaves which are already updated */
676                 if (cl->level)
677                         bstats_update(&cl->bstats, skb);
678 
679                 cl = cl->parent;
680         }
681 }
682 
683 /**
684  * htb_do_events - make mode changes to classes at the level
685  *
686  * Scans event queue for pending events and applies them. Returns time of
687  * next pending event (0 for no event in pq, q->now for too many events).
688  * Note: Applied are events whose have cl->pq_key <= q->now.
689  */
690 static s64 htb_do_events(struct htb_sched *q, const int level,
691                          unsigned long start)
692 {
693         /* don't run for longer than 2 jiffies; 2 is used instead of
694          * 1 to simplify things when jiffy is going to be incremented
695          * too soon
696          */
697         unsigned long stop_at = start + 2;
698         struct rb_root *wait_pq = &q->hlevel[level].wait_pq;
699 
700         while (time_before(jiffies, stop_at)) {
701                 struct htb_class *cl;
702                 s64 diff;
703                 struct rb_node *p = rb_first(wait_pq);
704 
705                 if (!p)
706                         return 0;
707 
708                 cl = rb_entry(p, struct htb_class, pq_node);
709                 if (cl->pq_key > q->now)
710                         return cl->pq_key;
711 
712                 htb_safe_rb_erase(p, wait_pq);
713                 diff = min_t(s64, q->now - cl->t_c, cl->mbuffer);
714                 htb_change_class_mode(q, cl, &diff);
715                 if (cl->cmode != HTB_CAN_SEND)
716                         htb_add_to_wait_tree(q, cl, diff);
717         }
718 
719         /* too much load - let's continue after a break for scheduling */
720         if (!(q->warned & HTB_WARN_TOOMANYEVENTS)) {
721                 pr_warn("htb: too many events!\n");
722                 q->warned |= HTB_WARN_TOOMANYEVENTS;
723         }
724 
725         return q->now;
726 }
727 
728 /* Returns class->node+prio from id-tree where classe's id is >= id. NULL
729  * is no such one exists.
730  */
731 static struct rb_node *htb_id_find_next_upper(int prio, struct rb_node *n,
732                                               u32 id)
733 {
734         struct rb_node *r = NULL;
735         while (n) {
736                 struct htb_class *cl =
737                     rb_entry(n, struct htb_class, node[prio]);
738 
739                 if (id > cl->common.classid) {
740                         n = n->rb_right;
741                 } else if (id < cl->common.classid) {
742                         r = n;
743                         n = n->rb_left;
744                 } else {
745                         return n;
746                 }
747         }
748         return r;
749 }
750 
751 /**
752  * htb_lookup_leaf - returns next leaf class in DRR order
753  *
754  * Find leaf where current feed pointers points to.
755  */
756 static struct htb_class *htb_lookup_leaf(struct htb_prio *hprio, const int prio)
757 {
758         int i;
759         struct {
760                 struct rb_node *root;
761                 struct rb_node **pptr;
762                 u32 *pid;
763         } stk[TC_HTB_MAXDEPTH], *sp = stk;
764 
765         BUG_ON(!hprio->row.rb_node);
766         sp->root = hprio->row.rb_node;
767         sp->pptr = &hprio->ptr;
768         sp->pid = &hprio->last_ptr_id;
769 
770         for (i = 0; i < 65535; i++) {
771                 if (!*sp->pptr && *sp->pid) {
772                         /* ptr was invalidated but id is valid - try to recover
773                          * the original or next ptr
774                          */
775                         *sp->pptr =
776                             htb_id_find_next_upper(prio, sp->root, *sp->pid);
777                 }
778                 *sp->pid = 0;   /* ptr is valid now so that remove this hint as it
779                                  * can become out of date quickly
780                                  */
781                 if (!*sp->pptr) {       /* we are at right end; rewind & go up */
782                         *sp->pptr = sp->root;
783                         while ((*sp->pptr)->rb_left)
784                                 *sp->pptr = (*sp->pptr)->rb_left;
785                         if (sp > stk) {
786                                 sp--;
787                                 if (!*sp->pptr) {
788                                         WARN_ON(1);
789                                         return NULL;
790                                 }
791                                 htb_next_rb_node(sp->pptr);
792                         }
793                 } else {
794                         struct htb_class *cl;
795                         struct htb_prio *clp;
796 
797                         cl = rb_entry(*sp->pptr, struct htb_class, node[prio]);
798                         if (!cl->level)
799                                 return cl;
800                         clp = &cl->un.inner.clprio[prio];
801                         (++sp)->root = clp->feed.rb_node;
802                         sp->pptr = &clp->ptr;
803                         sp->pid = &clp->last_ptr_id;
804                 }
805         }
806         WARN_ON(1);
807         return NULL;
808 }
809 
810 /* dequeues packet at given priority and level; call only if
811  * you are sure that there is active class at prio/level
812  */
813 static struct sk_buff *htb_dequeue_tree(struct htb_sched *q, const int prio,
814                                         const int level)
815 {
816         struct sk_buff *skb = NULL;
817         struct htb_class *cl, *start;
818         struct htb_level *hlevel = &q->hlevel[level];
819         struct htb_prio *hprio = &hlevel->hprio[prio];
820 
821         /* look initial class up in the row */
822         start = cl = htb_lookup_leaf(hprio, prio);
823 
824         do {
825 next:
826                 if (unlikely(!cl))
827                         return NULL;
828 
829                 /* class can be empty - it is unlikely but can be true if leaf
830                  * qdisc drops packets in enqueue routine or if someone used
831                  * graft operation on the leaf since last dequeue;
832                  * simply deactivate and skip such class
833                  */
834                 if (unlikely(cl->un.leaf.q->q.qlen == 0)) {
835                         struct htb_class *next;
836                         htb_deactivate(q, cl);
837 
838                         /* row/level might become empty */
839                         if ((q->row_mask[level] & (1 << prio)) == 0)
840                                 return NULL;
841 
842                         next = htb_lookup_leaf(hprio, prio);
843 
844                         if (cl == start)        /* fix start if we just deleted it */
845                                 start = next;
846                         cl = next;
847                         goto next;
848                 }
849 
850                 skb = cl->un.leaf.q->dequeue(cl->un.leaf.q);
851                 if (likely(skb != NULL))
852                         break;
853 
854                 qdisc_warn_nonwc("htb", cl->un.leaf.q);
855                 htb_next_rb_node(level ? &cl->parent->un.inner.clprio[prio].ptr:
856                                          &q->hlevel[0].hprio[prio].ptr);
857                 cl = htb_lookup_leaf(hprio, prio);
858 
859         } while (cl != start);
860 
861         if (likely(skb != NULL)) {
862                 bstats_update(&cl->bstats, skb);
863                 cl->un.leaf.deficit[level] -= qdisc_pkt_len(skb);
864                 if (cl->un.leaf.deficit[level] < 0) {
865                         cl->un.leaf.deficit[level] += cl->quantum;
866                         htb_next_rb_node(level ? &cl->parent->un.inner.clprio[prio].ptr :
867                                                  &q->hlevel[0].hprio[prio].ptr);
868                 }
869                 /* this used to be after charge_class but this constelation
870                  * gives us slightly better performance
871                  */
872                 if (!cl->un.leaf.q->q.qlen)
873                         htb_deactivate(q, cl);
874                 htb_charge_class(q, cl, level, skb);
875         }
876         return skb;
877 }
878 
879 static struct sk_buff *htb_dequeue(struct Qdisc *sch)
880 {
881         struct sk_buff *skb;
882         struct htb_sched *q = qdisc_priv(sch);
883         int level;
884         s64 next_event;
885         unsigned long start_at;
886 
887         /* try to dequeue direct packets as high prio (!) to minimize cpu work */
888         skb = __skb_dequeue(&q->direct_queue);
889         if (skb != NULL) {
890 ok:
891                 qdisc_bstats_update(sch, skb);
892                 qdisc_unthrottled(sch);
893                 qdisc_qstats_backlog_dec(sch, skb);
894                 sch->q.qlen--;
895                 return skb;
896         }
897 
898         if (!sch->q.qlen)
899                 goto fin;
900         q->now = ktime_get_ns();
901         start_at = jiffies;
902 
903         next_event = q->now + 5LLU * NSEC_PER_SEC;
904 
905         for (level = 0; level < TC_HTB_MAXDEPTH; level++) {
906                 /* common case optimization - skip event handler quickly */
907                 int m;
908                 s64 event = q->near_ev_cache[level];
909 
910                 if (q->now >= event) {
911                         event = htb_do_events(q, level, start_at);
912                         if (!event)
913                                 event = q->now + NSEC_PER_SEC;
914                         q->near_ev_cache[level] = event;
915                 }
916 
917                 if (next_event > event)
918                         next_event = event;
919 
920                 m = ~q->row_mask[level];
921                 while (m != (int)(-1)) {
922                         int prio = ffz(m);
923 
924                         m |= 1 << prio;
925                         skb = htb_dequeue_tree(q, prio, level);
926                         if (likely(skb != NULL))
927                                 goto ok;
928                 }
929         }
930         qdisc_qstats_overlimit(sch);
931         if (likely(next_event > q->now))
932                 qdisc_watchdog_schedule_ns(&q->watchdog, next_event, true);
933         else
934                 schedule_work(&q->work);
935 fin:
936         return skb;
937 }
938 
939 /* try to drop from each class (by prio) until one succeed */
940 static unsigned int htb_drop(struct Qdisc *sch)
941 {
942         struct htb_sched *q = qdisc_priv(sch);
943         int prio;
944 
945         for (prio = TC_HTB_NUMPRIO - 1; prio >= 0; prio--) {
946                 struct list_head *p;
947                 list_for_each(p, q->drops + prio) {
948                         struct htb_class *cl = list_entry(p, struct htb_class,
949                                                           un.leaf.drop_list);
950                         unsigned int len;
951                         if (cl->un.leaf.q->ops->drop &&
952                             (len = cl->un.leaf.q->ops->drop(cl->un.leaf.q))) {
953                                 sch->qstats.backlog -= len;
954                                 sch->q.qlen--;
955                                 if (!cl->un.leaf.q->q.qlen)
956                                         htb_deactivate(q, cl);
957                                 return len;
958                         }
959                 }
960         }
961         return 0;
962 }
963 
964 /* reset all classes */
965 /* always caled under BH & queue lock */
966 static void htb_reset(struct Qdisc *sch)
967 {
968         struct htb_sched *q = qdisc_priv(sch);
969         struct htb_class *cl;
970         unsigned int i;
971 
972         for (i = 0; i < q->clhash.hashsize; i++) {
973                 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
974                         if (cl->level)
975                                 memset(&cl->un.inner, 0, sizeof(cl->un.inner));
976                         else {
977                                 if (cl->un.leaf.q)
978                                         qdisc_reset(cl->un.leaf.q);
979                                 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
980                         }
981                         cl->prio_activity = 0;
982                         cl->cmode = HTB_CAN_SEND;
983                 }
984         }
985         qdisc_watchdog_cancel(&q->watchdog);
986         __skb_queue_purge(&q->direct_queue);
987         sch->q.qlen = 0;
988         sch->qstats.backlog = 0;
989         memset(q->hlevel, 0, sizeof(q->hlevel));
990         memset(q->row_mask, 0, sizeof(q->row_mask));
991         for (i = 0; i < TC_HTB_NUMPRIO; i++)
992                 INIT_LIST_HEAD(q->drops + i);
993 }
994 
995 static const struct nla_policy htb_policy[TCA_HTB_MAX + 1] = {
996         [TCA_HTB_PARMS] = { .len = sizeof(struct tc_htb_opt) },
997         [TCA_HTB_INIT]  = { .len = sizeof(struct tc_htb_glob) },
998         [TCA_HTB_CTAB]  = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
999         [TCA_HTB_RTAB]  = { .type = NLA_BINARY, .len = TC_RTAB_SIZE },
1000         [TCA_HTB_DIRECT_QLEN] = { .type = NLA_U32 },
1001         [TCA_HTB_RATE64] = { .type = NLA_U64 },
1002         [TCA_HTB_CEIL64] = { .type = NLA_U64 },
1003 };
1004 
1005 static void htb_work_func(struct work_struct *work)
1006 {
1007         struct htb_sched *q = container_of(work, struct htb_sched, work);
1008         struct Qdisc *sch = q->watchdog.qdisc;
1009 
1010         rcu_read_lock();
1011         __netif_schedule(qdisc_root(sch));
1012         rcu_read_unlock();
1013 }
1014 
1015 static int htb_init(struct Qdisc *sch, struct nlattr *opt)
1016 {
1017         struct htb_sched *q = qdisc_priv(sch);
1018         struct nlattr *tb[TCA_HTB_MAX + 1];
1019         struct tc_htb_glob *gopt;
1020         int err;
1021         int i;
1022 
1023         if (!opt)
1024                 return -EINVAL;
1025 
1026         err = nla_parse_nested(tb, TCA_HTB_MAX, opt, htb_policy);
1027         if (err < 0)
1028                 return err;
1029 
1030         if (!tb[TCA_HTB_INIT])
1031                 return -EINVAL;
1032 
1033         gopt = nla_data(tb[TCA_HTB_INIT]);
1034         if (gopt->version != HTB_VER >> 16)
1035                 return -EINVAL;
1036 
1037         err = qdisc_class_hash_init(&q->clhash);
1038         if (err < 0)
1039                 return err;
1040         for (i = 0; i < TC_HTB_NUMPRIO; i++)
1041                 INIT_LIST_HEAD(q->drops + i);
1042 
1043         qdisc_watchdog_init(&q->watchdog, sch);
1044         INIT_WORK(&q->work, htb_work_func);
1045         __skb_queue_head_init(&q->direct_queue);
1046 
1047         if (tb[TCA_HTB_DIRECT_QLEN])
1048                 q->direct_qlen = nla_get_u32(tb[TCA_HTB_DIRECT_QLEN]);
1049         else
1050                 q->direct_qlen = qdisc_dev(sch)->tx_queue_len;
1051 
1052         if ((q->rate2quantum = gopt->rate2quantum) < 1)
1053                 q->rate2quantum = 1;
1054         q->defcls = gopt->defcls;
1055 
1056         return 0;
1057 }
1058 
1059 static int htb_dump(struct Qdisc *sch, struct sk_buff *skb)
1060 {
1061         struct htb_sched *q = qdisc_priv(sch);
1062         struct nlattr *nest;
1063         struct tc_htb_glob gopt;
1064 
1065         /* Its safe to not acquire qdisc lock. As we hold RTNL,
1066          * no change can happen on the qdisc parameters.
1067          */
1068 
1069         gopt.direct_pkts = q->direct_pkts;
1070         gopt.version = HTB_VER;
1071         gopt.rate2quantum = q->rate2quantum;
1072         gopt.defcls = q->defcls;
1073         gopt.debug = 0;
1074 
1075         nest = nla_nest_start(skb, TCA_OPTIONS);
1076         if (nest == NULL)
1077                 goto nla_put_failure;
1078         if (nla_put(skb, TCA_HTB_INIT, sizeof(gopt), &gopt) ||
1079             nla_put_u32(skb, TCA_HTB_DIRECT_QLEN, q->direct_qlen))
1080                 goto nla_put_failure;
1081 
1082         return nla_nest_end(skb, nest);
1083 
1084 nla_put_failure:
1085         nla_nest_cancel(skb, nest);
1086         return -1;
1087 }
1088 
1089 static int htb_dump_class(struct Qdisc *sch, unsigned long arg,
1090                           struct sk_buff *skb, struct tcmsg *tcm)
1091 {
1092         struct htb_class *cl = (struct htb_class *)arg;
1093         struct nlattr *nest;
1094         struct tc_htb_opt opt;
1095 
1096         /* Its safe to not acquire qdisc lock. As we hold RTNL,
1097          * no change can happen on the class parameters.
1098          */
1099         tcm->tcm_parent = cl->parent ? cl->parent->common.classid : TC_H_ROOT;
1100         tcm->tcm_handle = cl->common.classid;
1101         if (!cl->level && cl->un.leaf.q)
1102                 tcm->tcm_info = cl->un.leaf.q->handle;
1103 
1104         nest = nla_nest_start(skb, TCA_OPTIONS);
1105         if (nest == NULL)
1106                 goto nla_put_failure;
1107 
1108         memset(&opt, 0, sizeof(opt));
1109 
1110         psched_ratecfg_getrate(&opt.rate, &cl->rate);
1111         opt.buffer = PSCHED_NS2TICKS(cl->buffer);
1112         psched_ratecfg_getrate(&opt.ceil, &cl->ceil);
1113         opt.cbuffer = PSCHED_NS2TICKS(cl->cbuffer);
1114         opt.quantum = cl->quantum;
1115         opt.prio = cl->prio;
1116         opt.level = cl->level;
1117         if (nla_put(skb, TCA_HTB_PARMS, sizeof(opt), &opt))
1118                 goto nla_put_failure;
1119         if ((cl->rate.rate_bytes_ps >= (1ULL << 32)) &&
1120             nla_put_u64_64bit(skb, TCA_HTB_RATE64, cl->rate.rate_bytes_ps,
1121                               TCA_HTB_PAD))
1122                 goto nla_put_failure;
1123         if ((cl->ceil.rate_bytes_ps >= (1ULL << 32)) &&
1124             nla_put_u64_64bit(skb, TCA_HTB_CEIL64, cl->ceil.rate_bytes_ps,
1125                               TCA_HTB_PAD))
1126                 goto nla_put_failure;
1127 
1128         return nla_nest_end(skb, nest);
1129 
1130 nla_put_failure:
1131         nla_nest_cancel(skb, nest);
1132         return -1;
1133 }
1134 
1135 static int
1136 htb_dump_class_stats(struct Qdisc *sch, unsigned long arg, struct gnet_dump *d)
1137 {
1138         struct htb_class *cl = (struct htb_class *)arg;
1139         __u32 qlen = 0;
1140 
1141         if (!cl->level && cl->un.leaf.q)
1142                 qlen = cl->un.leaf.q->q.qlen;
1143         cl->xstats.tokens = clamp_t(s64, PSCHED_NS2TICKS(cl->tokens),
1144                                     INT_MIN, INT_MAX);
1145         cl->xstats.ctokens = clamp_t(s64, PSCHED_NS2TICKS(cl->ctokens),
1146                                      INT_MIN, INT_MAX);
1147 
1148         if (gnet_stats_copy_basic(d, NULL, &cl->bstats) < 0 ||
1149             gnet_stats_copy_rate_est(d, NULL, &cl->rate_est) < 0 ||
1150             gnet_stats_copy_queue(d, NULL, &cl->qstats, qlen) < 0)
1151                 return -1;
1152 
1153         return gnet_stats_copy_app(d, &cl->xstats, sizeof(cl->xstats));
1154 }
1155 
1156 static int htb_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
1157                      struct Qdisc **old)
1158 {
1159         struct htb_class *cl = (struct htb_class *)arg;
1160 
1161         if (cl->level)
1162                 return -EINVAL;
1163         if (new == NULL &&
1164             (new = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1165                                      cl->common.classid)) == NULL)
1166                 return -ENOBUFS;
1167 
1168         *old = qdisc_replace(sch, new, &cl->un.leaf.q);
1169         return 0;
1170 }
1171 
1172 static struct Qdisc *htb_leaf(struct Qdisc *sch, unsigned long arg)
1173 {
1174         struct htb_class *cl = (struct htb_class *)arg;
1175         return !cl->level ? cl->un.leaf.q : NULL;
1176 }
1177 
1178 static void htb_qlen_notify(struct Qdisc *sch, unsigned long arg)
1179 {
1180         struct htb_class *cl = (struct htb_class *)arg;
1181 
1182         if (cl->un.leaf.q->q.qlen == 0)
1183                 htb_deactivate(qdisc_priv(sch), cl);
1184 }
1185 
1186 static unsigned long htb_get(struct Qdisc *sch, u32 classid)
1187 {
1188         struct htb_class *cl = htb_find(classid, sch);
1189         if (cl)
1190                 cl->refcnt++;
1191         return (unsigned long)cl;
1192 }
1193 
1194 static inline int htb_parent_last_child(struct htb_class *cl)
1195 {
1196         if (!cl->parent)
1197                 /* the root class */
1198                 return 0;
1199         if (cl->parent->children > 1)
1200                 /* not the last child */
1201                 return 0;
1202         return 1;
1203 }
1204 
1205 static void htb_parent_to_leaf(struct htb_sched *q, struct htb_class *cl,
1206                                struct Qdisc *new_q)
1207 {
1208         struct htb_class *parent = cl->parent;
1209 
1210         WARN_ON(cl->level || !cl->un.leaf.q || cl->prio_activity);
1211 
1212         if (parent->cmode != HTB_CAN_SEND)
1213                 htb_safe_rb_erase(&parent->pq_node,
1214                                   &q->hlevel[parent->level].wait_pq);
1215 
1216         parent->level = 0;
1217         memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1218         INIT_LIST_HEAD(&parent->un.leaf.drop_list);
1219         parent->un.leaf.q = new_q ? new_q : &noop_qdisc;
1220         parent->tokens = parent->buffer;
1221         parent->ctokens = parent->cbuffer;
1222         parent->t_c = ktime_get_ns();
1223         parent->cmode = HTB_CAN_SEND;
1224 }
1225 
1226 static void htb_destroy_class(struct Qdisc *sch, struct htb_class *cl)
1227 {
1228         if (!cl->level) {
1229                 WARN_ON(!cl->un.leaf.q);
1230                 qdisc_destroy(cl->un.leaf.q);
1231         }
1232         gen_kill_estimator(&cl->bstats, &cl->rate_est);
1233         tcf_destroy_chain(&cl->filter_list);
1234         kfree(cl);
1235 }
1236 
1237 static void htb_destroy(struct Qdisc *sch)
1238 {
1239         struct htb_sched *q = qdisc_priv(sch);
1240         struct hlist_node *next;
1241         struct htb_class *cl;
1242         unsigned int i;
1243 
1244         cancel_work_sync(&q->work);
1245         qdisc_watchdog_cancel(&q->watchdog);
1246         /* This line used to be after htb_destroy_class call below
1247          * and surprisingly it worked in 2.4. But it must precede it
1248          * because filter need its target class alive to be able to call
1249          * unbind_filter on it (without Oops).
1250          */
1251         tcf_destroy_chain(&q->filter_list);
1252 
1253         for (i = 0; i < q->clhash.hashsize; i++) {
1254                 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode)
1255                         tcf_destroy_chain(&cl->filter_list);
1256         }
1257         for (i = 0; i < q->clhash.hashsize; i++) {
1258                 hlist_for_each_entry_safe(cl, next, &q->clhash.hash[i],
1259                                           common.hnode)
1260                         htb_destroy_class(sch, cl);
1261         }
1262         qdisc_class_hash_destroy(&q->clhash);
1263         __skb_queue_purge(&q->direct_queue);
1264 }
1265 
1266 static int htb_delete(struct Qdisc *sch, unsigned long arg)
1267 {
1268         struct htb_sched *q = qdisc_priv(sch);
1269         struct htb_class *cl = (struct htb_class *)arg;
1270         struct Qdisc *new_q = NULL;
1271         int last_child = 0;
1272 
1273         /* TODO: why don't allow to delete subtree ? references ? does
1274          * tc subsys guarantee us that in htb_destroy it holds no class
1275          * refs so that we can remove children safely there ?
1276          */
1277         if (cl->children || cl->filter_cnt)
1278                 return -EBUSY;
1279 
1280         if (!cl->level && htb_parent_last_child(cl)) {
1281                 new_q = qdisc_create_dflt(sch->dev_queue, &pfifo_qdisc_ops,
1282                                           cl->parent->common.classid);
1283                 last_child = 1;
1284         }
1285 
1286         sch_tree_lock(sch);
1287 
1288         if (!cl->level) {
1289                 unsigned int qlen = cl->un.leaf.q->q.qlen;
1290                 unsigned int backlog = cl->un.leaf.q->qstats.backlog;
1291 
1292                 qdisc_reset(cl->un.leaf.q);
1293                 qdisc_tree_reduce_backlog(cl->un.leaf.q, qlen, backlog);
1294         }
1295 
1296         /* delete from hash and active; remainder in destroy_class */
1297         qdisc_class_hash_remove(&q->clhash, &cl->common);
1298         if (cl->parent)
1299                 cl->parent->children--;
1300 
1301         if (cl->prio_activity)
1302                 htb_deactivate(q, cl);
1303 
1304         if (cl->cmode != HTB_CAN_SEND)
1305                 htb_safe_rb_erase(&cl->pq_node,
1306                                   &q->hlevel[cl->level].wait_pq);
1307 
1308         if (last_child)
1309                 htb_parent_to_leaf(q, cl, new_q);
1310 
1311         BUG_ON(--cl->refcnt == 0);
1312         /*
1313          * This shouldn't happen: we "hold" one cops->get() when called
1314          * from tc_ctl_tclass; the destroy method is done from cops->put().
1315          */
1316 
1317         sch_tree_unlock(sch);
1318         return 0;
1319 }
1320 
1321 static void htb_put(struct Qdisc *sch, unsigned long arg)
1322 {
1323         struct htb_class *cl = (struct htb_class *)arg;
1324 
1325         if (--cl->refcnt == 0)
1326                 htb_destroy_class(sch, cl);
1327 }
1328 
1329 static int htb_change_class(struct Qdisc *sch, u32 classid,
1330                             u32 parentid, struct nlattr **tca,
1331                             unsigned long *arg)
1332 {
1333         int err = -EINVAL;
1334         struct htb_sched *q = qdisc_priv(sch);
1335         struct htb_class *cl = (struct htb_class *)*arg, *parent;
1336         struct nlattr *opt = tca[TCA_OPTIONS];
1337         struct nlattr *tb[TCA_HTB_MAX + 1];
1338         struct tc_htb_opt *hopt;
1339         u64 rate64, ceil64;
1340 
1341         /* extract all subattrs from opt attr */
1342         if (!opt)
1343                 goto failure;
1344 
1345         err = nla_parse_nested(tb, TCA_HTB_MAX, opt, htb_policy);
1346         if (err < 0)
1347                 goto failure;
1348 
1349         err = -EINVAL;
1350         if (tb[TCA_HTB_PARMS] == NULL)
1351                 goto failure;
1352 
1353         parent = parentid == TC_H_ROOT ? NULL : htb_find(parentid, sch);
1354 
1355         hopt = nla_data(tb[TCA_HTB_PARMS]);
1356         if (!hopt->rate.rate || !hopt->ceil.rate)
1357                 goto failure;
1358 
1359         /* Keeping backward compatible with rate_table based iproute2 tc */
1360         if (hopt->rate.linklayer == TC_LINKLAYER_UNAWARE)
1361                 qdisc_put_rtab(qdisc_get_rtab(&hopt->rate, tb[TCA_HTB_RTAB]));
1362 
1363         if (hopt->ceil.linklayer == TC_LINKLAYER_UNAWARE)
1364                 qdisc_put_rtab(qdisc_get_rtab(&hopt->ceil, tb[TCA_HTB_CTAB]));
1365 
1366         if (!cl) {              /* new class */
1367                 struct Qdisc *new_q;
1368                 int prio;
1369                 struct {
1370                         struct nlattr           nla;
1371                         struct gnet_estimator   opt;
1372                 } est = {
1373                         .nla = {
1374                                 .nla_len        = nla_attr_size(sizeof(est.opt)),
1375                                 .nla_type       = TCA_RATE,
1376                         },
1377                         .opt = {
1378                                 /* 4s interval, 16s averaging constant */
1379                                 .interval       = 2,
1380                                 .ewma_log       = 2,
1381                         },
1382                 };
1383 
1384                 /* check for valid classid */
1385                 if (!classid || TC_H_MAJ(classid ^ sch->handle) ||
1386                     htb_find(classid, sch))
1387                         goto failure;
1388 
1389                 /* check maximal depth */
1390                 if (parent && parent->parent && parent->parent->level < 2) {
1391                         pr_err("htb: tree is too deep\n");
1392                         goto failure;
1393                 }
1394                 err = -ENOBUFS;
1395                 cl = kzalloc(sizeof(*cl), GFP_KERNEL);
1396                 if (!cl)
1397                         goto failure;
1398 
1399                 if (htb_rate_est || tca[TCA_RATE]) {
1400                         err = gen_new_estimator(&cl->bstats, NULL,
1401                                                 &cl->rate_est,
1402                                                 qdisc_root_sleeping_lock(sch),
1403                                                 tca[TCA_RATE] ? : &est.nla);
1404                         if (err) {
1405                                 kfree(cl);
1406                                 goto failure;
1407                         }
1408                 }
1409 
1410                 cl->refcnt = 1;
1411                 cl->children = 0;
1412                 INIT_LIST_HEAD(&cl->un.leaf.drop_list);
1413                 RB_CLEAR_NODE(&cl->pq_node);
1414 
1415                 for (prio = 0; prio < TC_HTB_NUMPRIO; prio++)
1416                         RB_CLEAR_NODE(&cl->node[prio]);
1417 
1418                 /* create leaf qdisc early because it uses kmalloc(GFP_KERNEL)
1419                  * so that can't be used inside of sch_tree_lock
1420                  * -- thanks to Karlis Peisenieks
1421                  */
1422                 new_q = qdisc_create_dflt(sch->dev_queue,
1423                                           &pfifo_qdisc_ops, classid);
1424                 sch_tree_lock(sch);
1425                 if (parent && !parent->level) {
1426                         unsigned int qlen = parent->un.leaf.q->q.qlen;
1427                         unsigned int backlog = parent->un.leaf.q->qstats.backlog;
1428 
1429                         /* turn parent into inner node */
1430                         qdisc_reset(parent->un.leaf.q);
1431                         qdisc_tree_reduce_backlog(parent->un.leaf.q, qlen, backlog);
1432                         qdisc_destroy(parent->un.leaf.q);
1433                         if (parent->prio_activity)
1434                                 htb_deactivate(q, parent);
1435 
1436                         /* remove from evt list because of level change */
1437                         if (parent->cmode != HTB_CAN_SEND) {
1438                                 htb_safe_rb_erase(&parent->pq_node, &q->hlevel[0].wait_pq);
1439                                 parent->cmode = HTB_CAN_SEND;
1440                         }
1441                         parent->level = (parent->parent ? parent->parent->level
1442                                          : TC_HTB_MAXDEPTH) - 1;
1443                         memset(&parent->un.inner, 0, sizeof(parent->un.inner));
1444                 }
1445                 /* leaf (we) needs elementary qdisc */
1446                 cl->un.leaf.q = new_q ? new_q : &noop_qdisc;
1447 
1448                 cl->common.classid = classid;
1449                 cl->parent = parent;
1450 
1451                 /* set class to be in HTB_CAN_SEND state */
1452                 cl->tokens = PSCHED_TICKS2NS(hopt->buffer);
1453                 cl->ctokens = PSCHED_TICKS2NS(hopt->cbuffer);
1454                 cl->mbuffer = 60ULL * NSEC_PER_SEC;     /* 1min */
1455                 cl->t_c = ktime_get_ns();
1456                 cl->cmode = HTB_CAN_SEND;
1457 
1458                 /* attach to the hash list and parent's family */
1459                 qdisc_class_hash_insert(&q->clhash, &cl->common);
1460                 if (parent)
1461                         parent->children++;
1462         } else {
1463                 if (tca[TCA_RATE]) {
1464                         spinlock_t *lock = qdisc_root_sleeping_lock(sch);
1465 
1466                         err = gen_replace_estimator(&cl->bstats, NULL,
1467                                                     &cl->rate_est,
1468                                                     lock,
1469                                                     tca[TCA_RATE]);
1470                         if (err)
1471                                 return err;
1472                 }
1473                 sch_tree_lock(sch);
1474         }
1475 
1476         rate64 = tb[TCA_HTB_RATE64] ? nla_get_u64(tb[TCA_HTB_RATE64]) : 0;
1477 
1478         ceil64 = tb[TCA_HTB_CEIL64] ? nla_get_u64(tb[TCA_HTB_CEIL64]) : 0;
1479 
1480         psched_ratecfg_precompute(&cl->rate, &hopt->rate, rate64);
1481         psched_ratecfg_precompute(&cl->ceil, &hopt->ceil, ceil64);
1482 
1483         /* it used to be a nasty bug here, we have to check that node
1484          * is really leaf before changing cl->un.leaf !
1485          */
1486         if (!cl->level) {
1487                 u64 quantum = cl->rate.rate_bytes_ps;
1488 
1489                 do_div(quantum, q->rate2quantum);
1490                 cl->quantum = min_t(u64, quantum, INT_MAX);
1491 
1492                 if (!hopt->quantum && cl->quantum < 1000) {
1493                         pr_warn("HTB: quantum of class %X is small. Consider r2q change.\n",
1494                                 cl->common.classid);
1495                         cl->quantum = 1000;
1496                 }
1497                 if (!hopt->quantum && cl->quantum > 200000) {
1498                         pr_warn("HTB: quantum of class %X is big. Consider r2q change.\n",
1499                                 cl->common.classid);
1500                         cl->quantum = 200000;
1501                 }
1502                 if (hopt->quantum)
1503                         cl->quantum = hopt->quantum;
1504                 if ((cl->prio = hopt->prio) >= TC_HTB_NUMPRIO)
1505                         cl->prio = TC_HTB_NUMPRIO - 1;
1506         }
1507 
1508         cl->buffer = PSCHED_TICKS2NS(hopt->buffer);
1509         cl->cbuffer = PSCHED_TICKS2NS(hopt->cbuffer);
1510 
1511         sch_tree_unlock(sch);
1512 
1513         qdisc_class_hash_grow(sch, &q->clhash);
1514 
1515         *arg = (unsigned long)cl;
1516         return 0;
1517 
1518 failure:
1519         return err;
1520 }
1521 
1522 static struct tcf_proto __rcu **htb_find_tcf(struct Qdisc *sch,
1523                                              unsigned long arg)
1524 {
1525         struct htb_sched *q = qdisc_priv(sch);
1526         struct htb_class *cl = (struct htb_class *)arg;
1527         struct tcf_proto __rcu **fl = cl ? &cl->filter_list : &q->filter_list;
1528 
1529         return fl;
1530 }
1531 
1532 static unsigned long htb_bind_filter(struct Qdisc *sch, unsigned long parent,
1533                                      u32 classid)
1534 {
1535         struct htb_class *cl = htb_find(classid, sch);
1536 
1537         /*if (cl && !cl->level) return 0;
1538          * The line above used to be there to prevent attaching filters to
1539          * leaves. But at least tc_index filter uses this just to get class
1540          * for other reasons so that we have to allow for it.
1541          * ----
1542          * 19.6.2002 As Werner explained it is ok - bind filter is just
1543          * another way to "lock" the class - unlike "get" this lock can
1544          * be broken by class during destroy IIUC.
1545          */
1546         if (cl)
1547                 cl->filter_cnt++;
1548         return (unsigned long)cl;
1549 }
1550 
1551 static void htb_unbind_filter(struct Qdisc *sch, unsigned long arg)
1552 {
1553         struct htb_class *cl = (struct htb_class *)arg;
1554 
1555         if (cl)
1556                 cl->filter_cnt--;
1557 }
1558 
1559 static void htb_walk(struct Qdisc *sch, struct qdisc_walker *arg)
1560 {
1561         struct htb_sched *q = qdisc_priv(sch);
1562         struct htb_class *cl;
1563         unsigned int i;
1564 
1565         if (arg->stop)
1566                 return;
1567 
1568         for (i = 0; i < q->clhash.hashsize; i++) {
1569                 hlist_for_each_entry(cl, &q->clhash.hash[i], common.hnode) {
1570                         if (arg->count < arg->skip) {
1571                                 arg->count++;
1572                                 continue;
1573                         }
1574                         if (arg->fn(sch, (unsigned long)cl, arg) < 0) {
1575                                 arg->stop = 1;
1576                                 return;
1577                         }
1578                         arg->count++;
1579                 }
1580         }
1581 }
1582 
1583 static const struct Qdisc_class_ops htb_class_ops = {
1584         .graft          =       htb_graft,
1585         .leaf           =       htb_leaf,
1586         .qlen_notify    =       htb_qlen_notify,
1587         .get            =       htb_get,
1588         .put            =       htb_put,
1589         .change         =       htb_change_class,
1590         .delete         =       htb_delete,
1591         .walk           =       htb_walk,
1592         .tcf_chain      =       htb_find_tcf,
1593         .bind_tcf       =       htb_bind_filter,
1594         .unbind_tcf     =       htb_unbind_filter,
1595         .dump           =       htb_dump_class,
1596         .dump_stats     =       htb_dump_class_stats,
1597 };
1598 
1599 static struct Qdisc_ops htb_qdisc_ops __read_mostly = {
1600         .cl_ops         =       &htb_class_ops,
1601         .id             =       "htb",
1602         .priv_size      =       sizeof(struct htb_sched),
1603         .enqueue        =       htb_enqueue,
1604         .dequeue        =       htb_dequeue,
1605         .peek           =       qdisc_peek_dequeued,
1606         .drop           =       htb_drop,
1607         .init           =       htb_init,
1608         .reset          =       htb_reset,
1609         .destroy        =       htb_destroy,
1610         .dump           =       htb_dump,
1611         .owner          =       THIS_MODULE,
1612 };
1613 
1614 static int __init htb_module_init(void)
1615 {
1616         return register_qdisc(&htb_qdisc_ops);
1617 }
1618 static void __exit htb_module_exit(void)
1619 {
1620         unregister_qdisc(&htb_qdisc_ops);
1621 }
1622 
1623 module_init(htb_module_init)
1624 module_exit(htb_module_exit)
1625 MODULE_LICENSE("GPL");
1626 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us