Version:  2.0.40 2.2.26 2.4.37 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15 3.16 3.17 3.18

Linux/net/core/dst.c

  1 /*
  2  * net/core/dst.c       Protocol independent destination cache.
  3  *
  4  * Authors:             Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  5  *
  6  */
  7 
  8 #include <linux/bitops.h>
  9 #include <linux/errno.h>
 10 #include <linux/init.h>
 11 #include <linux/kernel.h>
 12 #include <linux/workqueue.h>
 13 #include <linux/mm.h>
 14 #include <linux/module.h>
 15 #include <linux/slab.h>
 16 #include <linux/netdevice.h>
 17 #include <linux/skbuff.h>
 18 #include <linux/string.h>
 19 #include <linux/types.h>
 20 #include <net/net_namespace.h>
 21 #include <linux/sched.h>
 22 #include <linux/prefetch.h>
 23 
 24 #include <net/dst.h>
 25 
 26 /*
 27  * Theory of operations:
 28  * 1) We use a list, protected by a spinlock, to add
 29  *    new entries from both BH and non-BH context.
 30  * 2) In order to keep spinlock held for a small delay,
 31  *    we use a second list where are stored long lived
 32  *    entries, that are handled by the garbage collect thread
 33  *    fired by a workqueue.
 34  * 3) This list is guarded by a mutex,
 35  *    so that the gc_task and dst_dev_event() can be synchronized.
 36  */
 37 
 38 /*
 39  * We want to keep lock & list close together
 40  * to dirty as few cache lines as possible in __dst_free().
 41  * As this is not a very strong hint, we dont force an alignment on SMP.
 42  */
 43 static struct {
 44         spinlock_t              lock;
 45         struct dst_entry        *list;
 46         unsigned long           timer_inc;
 47         unsigned long           timer_expires;
 48 } dst_garbage = {
 49         .lock = __SPIN_LOCK_UNLOCKED(dst_garbage.lock),
 50         .timer_inc = DST_GC_MAX,
 51 };
 52 static void dst_gc_task(struct work_struct *work);
 53 static void ___dst_free(struct dst_entry *dst);
 54 
 55 static DECLARE_DELAYED_WORK(dst_gc_work, dst_gc_task);
 56 
 57 static DEFINE_MUTEX(dst_gc_mutex);
 58 /*
 59  * long lived entries are maintained in this list, guarded by dst_gc_mutex
 60  */
 61 static struct dst_entry         *dst_busy_list;
 62 
 63 static void dst_gc_task(struct work_struct *work)
 64 {
 65         int    delayed = 0;
 66         int    work_performed = 0;
 67         unsigned long expires = ~0L;
 68         struct dst_entry *dst, *next, head;
 69         struct dst_entry *last = &head;
 70 
 71         mutex_lock(&dst_gc_mutex);
 72         next = dst_busy_list;
 73 
 74 loop:
 75         while ((dst = next) != NULL) {
 76                 next = dst->next;
 77                 prefetch(&next->next);
 78                 cond_resched();
 79                 if (likely(atomic_read(&dst->__refcnt))) {
 80                         last->next = dst;
 81                         last = dst;
 82                         delayed++;
 83                         continue;
 84                 }
 85                 work_performed++;
 86 
 87                 dst = dst_destroy(dst);
 88                 if (dst) {
 89                         /* NOHASH and still referenced. Unless it is already
 90                          * on gc list, invalidate it and add to gc list.
 91                          *
 92                          * Note: this is temporary. Actually, NOHASH dst's
 93                          * must be obsoleted when parent is obsoleted.
 94                          * But we do not have state "obsoleted, but
 95                          * referenced by parent", so it is right.
 96                          */
 97                         if (dst->obsolete > 0)
 98                                 continue;
 99 
100                         ___dst_free(dst);
101                         dst->next = next;
102                         next = dst;
103                 }
104         }
105 
106         spin_lock_bh(&dst_garbage.lock);
107         next = dst_garbage.list;
108         if (next) {
109                 dst_garbage.list = NULL;
110                 spin_unlock_bh(&dst_garbage.lock);
111                 goto loop;
112         }
113         last->next = NULL;
114         dst_busy_list = head.next;
115         if (!dst_busy_list)
116                 dst_garbage.timer_inc = DST_GC_MAX;
117         else {
118                 /*
119                  * if we freed less than 1/10 of delayed entries,
120                  * we can sleep longer.
121                  */
122                 if (work_performed <= delayed/10) {
123                         dst_garbage.timer_expires += dst_garbage.timer_inc;
124                         if (dst_garbage.timer_expires > DST_GC_MAX)
125                                 dst_garbage.timer_expires = DST_GC_MAX;
126                         dst_garbage.timer_inc += DST_GC_INC;
127                 } else {
128                         dst_garbage.timer_inc = DST_GC_INC;
129                         dst_garbage.timer_expires = DST_GC_MIN;
130                 }
131                 expires = dst_garbage.timer_expires;
132                 /*
133                  * if the next desired timer is more than 4 seconds in the
134                  * future then round the timer to whole seconds
135                  */
136                 if (expires > 4*HZ)
137                         expires = round_jiffies_relative(expires);
138                 schedule_delayed_work(&dst_gc_work, expires);
139         }
140 
141         spin_unlock_bh(&dst_garbage.lock);
142         mutex_unlock(&dst_gc_mutex);
143 }
144 
145 int dst_discard_sk(struct sock *sk, struct sk_buff *skb)
146 {
147         kfree_skb(skb);
148         return 0;
149 }
150 EXPORT_SYMBOL(dst_discard_sk);
151 
152 const u32 dst_default_metrics[RTAX_MAX + 1] = {
153         /* This initializer is needed to force linker to place this variable
154          * into const section. Otherwise it might end into bss section.
155          * We really want to avoid false sharing on this variable, and catch
156          * any writes on it.
157          */
158         [RTAX_MAX] = 0xdeadbeef,
159 };
160 
161 
162 void *dst_alloc(struct dst_ops *ops, struct net_device *dev,
163                 int initial_ref, int initial_obsolete, unsigned short flags)
164 {
165         struct dst_entry *dst;
166 
167         if (ops->gc && dst_entries_get_fast(ops) > ops->gc_thresh) {
168                 if (ops->gc(ops))
169                         return NULL;
170         }
171         dst = kmem_cache_alloc(ops->kmem_cachep, GFP_ATOMIC);
172         if (!dst)
173                 return NULL;
174         dst->child = NULL;
175         dst->dev = dev;
176         if (dev)
177                 dev_hold(dev);
178         dst->ops = ops;
179         dst_init_metrics(dst, dst_default_metrics, true);
180         dst->expires = 0UL;
181         dst->path = dst;
182         dst->from = NULL;
183 #ifdef CONFIG_XFRM
184         dst->xfrm = NULL;
185 #endif
186         dst->input = dst_discard;
187         dst->output = dst_discard_sk;
188         dst->error = 0;
189         dst->obsolete = initial_obsolete;
190         dst->header_len = 0;
191         dst->trailer_len = 0;
192 #ifdef CONFIG_IP_ROUTE_CLASSID
193         dst->tclassid = 0;
194 #endif
195         atomic_set(&dst->__refcnt, initial_ref);
196         dst->__use = 0;
197         dst->lastuse = jiffies;
198         dst->flags = flags;
199         dst->pending_confirm = 0;
200         dst->next = NULL;
201         if (!(flags & DST_NOCOUNT))
202                 dst_entries_add(ops, 1);
203         return dst;
204 }
205 EXPORT_SYMBOL(dst_alloc);
206 
207 static void ___dst_free(struct dst_entry *dst)
208 {
209         /* The first case (dev==NULL) is required, when
210            protocol module is unloaded.
211          */
212         if (dst->dev == NULL || !(dst->dev->flags&IFF_UP)) {
213                 dst->input = dst_discard;
214                 dst->output = dst_discard_sk;
215         }
216         dst->obsolete = DST_OBSOLETE_DEAD;
217 }
218 
219 void __dst_free(struct dst_entry *dst)
220 {
221         spin_lock_bh(&dst_garbage.lock);
222         ___dst_free(dst);
223         dst->next = dst_garbage.list;
224         dst_garbage.list = dst;
225         if (dst_garbage.timer_inc > DST_GC_INC) {
226                 dst_garbage.timer_inc = DST_GC_INC;
227                 dst_garbage.timer_expires = DST_GC_MIN;
228                 mod_delayed_work(system_wq, &dst_gc_work,
229                                  dst_garbage.timer_expires);
230         }
231         spin_unlock_bh(&dst_garbage.lock);
232 }
233 EXPORT_SYMBOL(__dst_free);
234 
235 struct dst_entry *dst_destroy(struct dst_entry * dst)
236 {
237         struct dst_entry *child;
238 
239         smp_rmb();
240 
241 again:
242         child = dst->child;
243 
244         if (!(dst->flags & DST_NOCOUNT))
245                 dst_entries_add(dst->ops, -1);
246 
247         if (dst->ops->destroy)
248                 dst->ops->destroy(dst);
249         if (dst->dev)
250                 dev_put(dst->dev);
251         kmem_cache_free(dst->ops->kmem_cachep, dst);
252 
253         dst = child;
254         if (dst) {
255                 int nohash = dst->flags & DST_NOHASH;
256 
257                 if (atomic_dec_and_test(&dst->__refcnt)) {
258                         /* We were real parent of this dst, so kill child. */
259                         if (nohash)
260                                 goto again;
261                 } else {
262                         /* Child is still referenced, return it for freeing. */
263                         if (nohash)
264                                 return dst;
265                         /* Child is still in his hash table */
266                 }
267         }
268         return NULL;
269 }
270 EXPORT_SYMBOL(dst_destroy);
271 
272 static void dst_destroy_rcu(struct rcu_head *head)
273 {
274         struct dst_entry *dst = container_of(head, struct dst_entry, rcu_head);
275 
276         dst = dst_destroy(dst);
277         if (dst)
278                 __dst_free(dst);
279 }
280 
281 void dst_release(struct dst_entry *dst)
282 {
283         if (dst) {
284                 int newrefcnt;
285 
286                 newrefcnt = atomic_dec_return(&dst->__refcnt);
287                 WARN_ON(newrefcnt < 0);
288                 if (unlikely(dst->flags & DST_NOCACHE) && !newrefcnt)
289                         call_rcu(&dst->rcu_head, dst_destroy_rcu);
290         }
291 }
292 EXPORT_SYMBOL(dst_release);
293 
294 u32 *dst_cow_metrics_generic(struct dst_entry *dst, unsigned long old)
295 {
296         u32 *p = kmalloc(sizeof(u32) * RTAX_MAX, GFP_ATOMIC);
297 
298         if (p) {
299                 u32 *old_p = __DST_METRICS_PTR(old);
300                 unsigned long prev, new;
301 
302                 memcpy(p, old_p, sizeof(u32) * RTAX_MAX);
303 
304                 new = (unsigned long) p;
305                 prev = cmpxchg(&dst->_metrics, old, new);
306 
307                 if (prev != old) {
308                         kfree(p);
309                         p = __DST_METRICS_PTR(prev);
310                         if (prev & DST_METRICS_READ_ONLY)
311                                 p = NULL;
312                 }
313         }
314         return p;
315 }
316 EXPORT_SYMBOL(dst_cow_metrics_generic);
317 
318 /* Caller asserts that dst_metrics_read_only(dst) is false.  */
319 void __dst_destroy_metrics_generic(struct dst_entry *dst, unsigned long old)
320 {
321         unsigned long prev, new;
322 
323         new = ((unsigned long) dst_default_metrics) | DST_METRICS_READ_ONLY;
324         prev = cmpxchg(&dst->_metrics, old, new);
325         if (prev == old)
326                 kfree(__DST_METRICS_PTR(old));
327 }
328 EXPORT_SYMBOL(__dst_destroy_metrics_generic);
329 
330 /**
331  * __skb_dst_set_noref - sets skb dst, without a reference
332  * @skb: buffer
333  * @dst: dst entry
334  * @force: if force is set, use noref version even for DST_NOCACHE entries
335  *
336  * Sets skb dst, assuming a reference was not taken on dst
337  * skb_dst_drop() should not dst_release() this dst
338  */
339 void __skb_dst_set_noref(struct sk_buff *skb, struct dst_entry *dst, bool force)
340 {
341         WARN_ON(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
342         /* If dst not in cache, we must take a reference, because
343          * dst_release() will destroy dst as soon as its refcount becomes zero
344          */
345         if (unlikely((dst->flags & DST_NOCACHE) && !force)) {
346                 dst_hold(dst);
347                 skb_dst_set(skb, dst);
348         } else {
349                 skb->_skb_refdst = (unsigned long)dst | SKB_DST_NOREF;
350         }
351 }
352 EXPORT_SYMBOL(__skb_dst_set_noref);
353 
354 /* Dirty hack. We did it in 2.2 (in __dst_free),
355  * we have _very_ good reasons not to repeat
356  * this mistake in 2.3, but we have no choice
357  * now. _It_ _is_ _explicit_ _deliberate_
358  * _race_ _condition_.
359  *
360  * Commented and originally written by Alexey.
361  */
362 static void dst_ifdown(struct dst_entry *dst, struct net_device *dev,
363                        int unregister)
364 {
365         if (dst->ops->ifdown)
366                 dst->ops->ifdown(dst, dev, unregister);
367 
368         if (dev != dst->dev)
369                 return;
370 
371         if (!unregister) {
372                 dst->input = dst_discard;
373                 dst->output = dst_discard_sk;
374         } else {
375                 dst->dev = dev_net(dst->dev)->loopback_dev;
376                 dev_hold(dst->dev);
377                 dev_put(dev);
378         }
379 }
380 
381 static int dst_dev_event(struct notifier_block *this, unsigned long event,
382                          void *ptr)
383 {
384         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
385         struct dst_entry *dst, *last = NULL;
386 
387         switch (event) {
388         case NETDEV_UNREGISTER_FINAL:
389         case NETDEV_DOWN:
390                 mutex_lock(&dst_gc_mutex);
391                 for (dst = dst_busy_list; dst; dst = dst->next) {
392                         last = dst;
393                         dst_ifdown(dst, dev, event != NETDEV_DOWN);
394                 }
395 
396                 spin_lock_bh(&dst_garbage.lock);
397                 dst = dst_garbage.list;
398                 dst_garbage.list = NULL;
399                 spin_unlock_bh(&dst_garbage.lock);
400 
401                 if (last)
402                         last->next = dst;
403                 else
404                         dst_busy_list = dst;
405                 for (; dst; dst = dst->next)
406                         dst_ifdown(dst, dev, event != NETDEV_DOWN);
407                 mutex_unlock(&dst_gc_mutex);
408                 break;
409         }
410         return NOTIFY_DONE;
411 }
412 
413 static struct notifier_block dst_dev_notifier = {
414         .notifier_call  = dst_dev_event,
415         .priority = -10, /* must be called after other network notifiers */
416 };
417 
418 void __init dst_init(void)
419 {
420         register_netdevice_notifier(&dst_dev_notifier);
421 }
422 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us