Version:  2.0.40 2.2.26 2.4.37 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1

Linux/net/core/dev.c

  1 /*
  2  *      NET3    Protocol independent device support routines.
  3  *
  4  *              This program is free software; you can redistribute it and/or
  5  *              modify it under the terms of the GNU General Public License
  6  *              as published by the Free Software Foundation; either version
  7  *              2 of the License, or (at your option) any later version.
  8  *
  9  *      Derived from the non IP parts of dev.c 1.0.19
 10  *              Authors:        Ross Biro
 11  *                              Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
 12  *                              Mark Evans, <evansmp@uhura.aston.ac.uk>
 13  *
 14  *      Additional Authors:
 15  *              Florian la Roche <rzsfl@rz.uni-sb.de>
 16  *              Alan Cox <gw4pts@gw4pts.ampr.org>
 17  *              David Hinds <dahinds@users.sourceforge.net>
 18  *              Alexey Kuznetsov <kuznet@ms2.inr.ac.ru>
 19  *              Adam Sulmicki <adam@cfar.umd.edu>
 20  *              Pekka Riikonen <priikone@poesidon.pspt.fi>
 21  *
 22  *      Changes:
 23  *              D.J. Barrow     :       Fixed bug where dev->refcnt gets set
 24  *                                      to 2 if register_netdev gets called
 25  *                                      before net_dev_init & also removed a
 26  *                                      few lines of code in the process.
 27  *              Alan Cox        :       device private ioctl copies fields back.
 28  *              Alan Cox        :       Transmit queue code does relevant
 29  *                                      stunts to keep the queue safe.
 30  *              Alan Cox        :       Fixed double lock.
 31  *              Alan Cox        :       Fixed promisc NULL pointer trap
 32  *              ????????        :       Support the full private ioctl range
 33  *              Alan Cox        :       Moved ioctl permission check into
 34  *                                      drivers
 35  *              Tim Kordas      :       SIOCADDMULTI/SIOCDELMULTI
 36  *              Alan Cox        :       100 backlog just doesn't cut it when
 37  *                                      you start doing multicast video 8)
 38  *              Alan Cox        :       Rewrote net_bh and list manager.
 39  *              Alan Cox        :       Fix ETH_P_ALL echoback lengths.
 40  *              Alan Cox        :       Took out transmit every packet pass
 41  *                                      Saved a few bytes in the ioctl handler
 42  *              Alan Cox        :       Network driver sets packet type before
 43  *                                      calling netif_rx. Saves a function
 44  *                                      call a packet.
 45  *              Alan Cox        :       Hashed net_bh()
 46  *              Richard Kooijman:       Timestamp fixes.
 47  *              Alan Cox        :       Wrong field in SIOCGIFDSTADDR
 48  *              Alan Cox        :       Device lock protection.
 49  *              Alan Cox        :       Fixed nasty side effect of device close
 50  *                                      changes.
 51  *              Rudi Cilibrasi  :       Pass the right thing to
 52  *                                      set_mac_address()
 53  *              Dave Miller     :       32bit quantity for the device lock to
 54  *                                      make it work out on a Sparc.
 55  *              Bjorn Ekwall    :       Added KERNELD hack.
 56  *              Alan Cox        :       Cleaned up the backlog initialise.
 57  *              Craig Metz      :       SIOCGIFCONF fix if space for under
 58  *                                      1 device.
 59  *          Thomas Bogendoerfer :       Return ENODEV for dev_open, if there
 60  *                                      is no device open function.
 61  *              Andi Kleen      :       Fix error reporting for SIOCGIFCONF
 62  *          Michael Chastain    :       Fix signed/unsigned for SIOCGIFCONF
 63  *              Cyrus Durgin    :       Cleaned for KMOD
 64  *              Adam Sulmicki   :       Bug Fix : Network Device Unload
 65  *                                      A network device unload needs to purge
 66  *                                      the backlog queue.
 67  *      Paul Rusty Russell      :       SIOCSIFNAME
 68  *              Pekka Riikonen  :       Netdev boot-time settings code
 69  *              Andrew Morton   :       Make unregister_netdevice wait
 70  *                                      indefinitely on dev->refcnt
 71  *              J Hadi Salim    :       - Backlog queue sampling
 72  *                                      - netif_rx() feedback
 73  */
 74 
 75 #include <asm/uaccess.h>
 76 #include <linux/bitops.h>
 77 #include <linux/capability.h>
 78 #include <linux/cpu.h>
 79 #include <linux/types.h>
 80 #include <linux/kernel.h>
 81 #include <linux/hash.h>
 82 #include <linux/slab.h>
 83 #include <linux/sched.h>
 84 #include <linux/mutex.h>
 85 #include <linux/string.h>
 86 #include <linux/mm.h>
 87 #include <linux/socket.h>
 88 #include <linux/sockios.h>
 89 #include <linux/errno.h>
 90 #include <linux/interrupt.h>
 91 #include <linux/if_ether.h>
 92 #include <linux/netdevice.h>
 93 #include <linux/etherdevice.h>
 94 #include <linux/ethtool.h>
 95 #include <linux/notifier.h>
 96 #include <linux/skbuff.h>
 97 #include <net/net_namespace.h>
 98 #include <net/sock.h>
 99 #include <linux/rtnetlink.h>
100 #include <linux/stat.h>
101 #include <net/dst.h>
102 #include <net/pkt_sched.h>
103 #include <net/checksum.h>
104 #include <net/xfrm.h>
105 #include <linux/highmem.h>
106 #include <linux/init.h>
107 #include <linux/module.h>
108 #include <linux/netpoll.h>
109 #include <linux/rcupdate.h>
110 #include <linux/delay.h>
111 #include <net/iw_handler.h>
112 #include <asm/current.h>
113 #include <linux/audit.h>
114 #include <linux/dmaengine.h>
115 #include <linux/err.h>
116 #include <linux/ctype.h>
117 #include <linux/if_arp.h>
118 #include <linux/if_vlan.h>
119 #include <linux/ip.h>
120 #include <net/ip.h>
121 #include <net/mpls.h>
122 #include <linux/ipv6.h>
123 #include <linux/in.h>
124 #include <linux/jhash.h>
125 #include <linux/random.h>
126 #include <trace/events/napi.h>
127 #include <trace/events/net.h>
128 #include <trace/events/skb.h>
129 #include <linux/pci.h>
130 #include <linux/inetdevice.h>
131 #include <linux/cpu_rmap.h>
132 #include <linux/static_key.h>
133 #include <linux/hashtable.h>
134 #include <linux/vmalloc.h>
135 #include <linux/if_macvlan.h>
136 #include <linux/errqueue.h>
137 #include <linux/hrtimer.h>
138 
139 #include "net-sysfs.h"
140 
141 /* Instead of increasing this, you should create a hash table. */
142 #define MAX_GRO_SKBS 8
143 
144 /* This should be increased if a protocol with a bigger head is added. */
145 #define GRO_MAX_HEAD (MAX_HEADER + 128)
146 
147 static DEFINE_SPINLOCK(ptype_lock);
148 static DEFINE_SPINLOCK(offload_lock);
149 struct list_head ptype_base[PTYPE_HASH_SIZE] __read_mostly;
150 struct list_head ptype_all __read_mostly;       /* Taps */
151 static struct list_head offload_base __read_mostly;
152 
153 static int netif_rx_internal(struct sk_buff *skb);
154 static int call_netdevice_notifiers_info(unsigned long val,
155                                          struct net_device *dev,
156                                          struct netdev_notifier_info *info);
157 
158 /*
159  * The @dev_base_head list is protected by @dev_base_lock and the rtnl
160  * semaphore.
161  *
162  * Pure readers hold dev_base_lock for reading, or rcu_read_lock()
163  *
164  * Writers must hold the rtnl semaphore while they loop through the
165  * dev_base_head list, and hold dev_base_lock for writing when they do the
166  * actual updates.  This allows pure readers to access the list even
167  * while a writer is preparing to update it.
168  *
169  * To put it another way, dev_base_lock is held for writing only to
170  * protect against pure readers; the rtnl semaphore provides the
171  * protection against other writers.
172  *
173  * See, for example usages, register_netdevice() and
174  * unregister_netdevice(), which must be called with the rtnl
175  * semaphore held.
176  */
177 DEFINE_RWLOCK(dev_base_lock);
178 EXPORT_SYMBOL(dev_base_lock);
179 
180 /* protects napi_hash addition/deletion and napi_gen_id */
181 static DEFINE_SPINLOCK(napi_hash_lock);
182 
183 static unsigned int napi_gen_id;
184 static DEFINE_HASHTABLE(napi_hash, 8);
185 
186 static seqcount_t devnet_rename_seq;
187 
188 static inline void dev_base_seq_inc(struct net *net)
189 {
190         while (++net->dev_base_seq == 0);
191 }
192 
193 static inline struct hlist_head *dev_name_hash(struct net *net, const char *name)
194 {
195         unsigned int hash = full_name_hash(name, strnlen(name, IFNAMSIZ));
196 
197         return &net->dev_name_head[hash_32(hash, NETDEV_HASHBITS)];
198 }
199 
200 static inline struct hlist_head *dev_index_hash(struct net *net, int ifindex)
201 {
202         return &net->dev_index_head[ifindex & (NETDEV_HASHENTRIES - 1)];
203 }
204 
205 static inline void rps_lock(struct softnet_data *sd)
206 {
207 #ifdef CONFIG_RPS
208         spin_lock(&sd->input_pkt_queue.lock);
209 #endif
210 }
211 
212 static inline void rps_unlock(struct softnet_data *sd)
213 {
214 #ifdef CONFIG_RPS
215         spin_unlock(&sd->input_pkt_queue.lock);
216 #endif
217 }
218 
219 /* Device list insertion */
220 static void list_netdevice(struct net_device *dev)
221 {
222         struct net *net = dev_net(dev);
223 
224         ASSERT_RTNL();
225 
226         write_lock_bh(&dev_base_lock);
227         list_add_tail_rcu(&dev->dev_list, &net->dev_base_head);
228         hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
229         hlist_add_head_rcu(&dev->index_hlist,
230                            dev_index_hash(net, dev->ifindex));
231         write_unlock_bh(&dev_base_lock);
232 
233         dev_base_seq_inc(net);
234 }
235 
236 /* Device list removal
237  * caller must respect a RCU grace period before freeing/reusing dev
238  */
239 static void unlist_netdevice(struct net_device *dev)
240 {
241         ASSERT_RTNL();
242 
243         /* Unlink dev from the device chain */
244         write_lock_bh(&dev_base_lock);
245         list_del_rcu(&dev->dev_list);
246         hlist_del_rcu(&dev->name_hlist);
247         hlist_del_rcu(&dev->index_hlist);
248         write_unlock_bh(&dev_base_lock);
249 
250         dev_base_seq_inc(dev_net(dev));
251 }
252 
253 /*
254  *      Our notifier list
255  */
256 
257 static RAW_NOTIFIER_HEAD(netdev_chain);
258 
259 /*
260  *      Device drivers call our routines to queue packets here. We empty the
261  *      queue in the local softnet handler.
262  */
263 
264 DEFINE_PER_CPU_ALIGNED(struct softnet_data, softnet_data);
265 EXPORT_PER_CPU_SYMBOL(softnet_data);
266 
267 #ifdef CONFIG_LOCKDEP
268 /*
269  * register_netdevice() inits txq->_xmit_lock and sets lockdep class
270  * according to dev->type
271  */
272 static const unsigned short netdev_lock_type[] =
273         {ARPHRD_NETROM, ARPHRD_ETHER, ARPHRD_EETHER, ARPHRD_AX25,
274          ARPHRD_PRONET, ARPHRD_CHAOS, ARPHRD_IEEE802, ARPHRD_ARCNET,
275          ARPHRD_APPLETLK, ARPHRD_DLCI, ARPHRD_ATM, ARPHRD_METRICOM,
276          ARPHRD_IEEE1394, ARPHRD_EUI64, ARPHRD_INFINIBAND, ARPHRD_SLIP,
277          ARPHRD_CSLIP, ARPHRD_SLIP6, ARPHRD_CSLIP6, ARPHRD_RSRVD,
278          ARPHRD_ADAPT, ARPHRD_ROSE, ARPHRD_X25, ARPHRD_HWX25,
279          ARPHRD_PPP, ARPHRD_CISCO, ARPHRD_LAPB, ARPHRD_DDCMP,
280          ARPHRD_RAWHDLC, ARPHRD_TUNNEL, ARPHRD_TUNNEL6, ARPHRD_FRAD,
281          ARPHRD_SKIP, ARPHRD_LOOPBACK, ARPHRD_LOCALTLK, ARPHRD_FDDI,
282          ARPHRD_BIF, ARPHRD_SIT, ARPHRD_IPDDP, ARPHRD_IPGRE,
283          ARPHRD_PIMREG, ARPHRD_HIPPI, ARPHRD_ASH, ARPHRD_ECONET,
284          ARPHRD_IRDA, ARPHRD_FCPP, ARPHRD_FCAL, ARPHRD_FCPL,
285          ARPHRD_FCFABRIC, ARPHRD_IEEE80211, ARPHRD_IEEE80211_PRISM,
286          ARPHRD_IEEE80211_RADIOTAP, ARPHRD_PHONET, ARPHRD_PHONET_PIPE,
287          ARPHRD_IEEE802154, ARPHRD_VOID, ARPHRD_NONE};
288 
289 static const char *const netdev_lock_name[] =
290         {"_xmit_NETROM", "_xmit_ETHER", "_xmit_EETHER", "_xmit_AX25",
291          "_xmit_PRONET", "_xmit_CHAOS", "_xmit_IEEE802", "_xmit_ARCNET",
292          "_xmit_APPLETLK", "_xmit_DLCI", "_xmit_ATM", "_xmit_METRICOM",
293          "_xmit_IEEE1394", "_xmit_EUI64", "_xmit_INFINIBAND", "_xmit_SLIP",
294          "_xmit_CSLIP", "_xmit_SLIP6", "_xmit_CSLIP6", "_xmit_RSRVD",
295          "_xmit_ADAPT", "_xmit_ROSE", "_xmit_X25", "_xmit_HWX25",
296          "_xmit_PPP", "_xmit_CISCO", "_xmit_LAPB", "_xmit_DDCMP",
297          "_xmit_RAWHDLC", "_xmit_TUNNEL", "_xmit_TUNNEL6", "_xmit_FRAD",
298          "_xmit_SKIP", "_xmit_LOOPBACK", "_xmit_LOCALTLK", "_xmit_FDDI",
299          "_xmit_BIF", "_xmit_SIT", "_xmit_IPDDP", "_xmit_IPGRE",
300          "_xmit_PIMREG", "_xmit_HIPPI", "_xmit_ASH", "_xmit_ECONET",
301          "_xmit_IRDA", "_xmit_FCPP", "_xmit_FCAL", "_xmit_FCPL",
302          "_xmit_FCFABRIC", "_xmit_IEEE80211", "_xmit_IEEE80211_PRISM",
303          "_xmit_IEEE80211_RADIOTAP", "_xmit_PHONET", "_xmit_PHONET_PIPE",
304          "_xmit_IEEE802154", "_xmit_VOID", "_xmit_NONE"};
305 
306 static struct lock_class_key netdev_xmit_lock_key[ARRAY_SIZE(netdev_lock_type)];
307 static struct lock_class_key netdev_addr_lock_key[ARRAY_SIZE(netdev_lock_type)];
308 
309 static inline unsigned short netdev_lock_pos(unsigned short dev_type)
310 {
311         int i;
312 
313         for (i = 0; i < ARRAY_SIZE(netdev_lock_type); i++)
314                 if (netdev_lock_type[i] == dev_type)
315                         return i;
316         /* the last key is used by default */
317         return ARRAY_SIZE(netdev_lock_type) - 1;
318 }
319 
320 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
321                                                  unsigned short dev_type)
322 {
323         int i;
324 
325         i = netdev_lock_pos(dev_type);
326         lockdep_set_class_and_name(lock, &netdev_xmit_lock_key[i],
327                                    netdev_lock_name[i]);
328 }
329 
330 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
331 {
332         int i;
333 
334         i = netdev_lock_pos(dev->type);
335         lockdep_set_class_and_name(&dev->addr_list_lock,
336                                    &netdev_addr_lock_key[i],
337                                    netdev_lock_name[i]);
338 }
339 #else
340 static inline void netdev_set_xmit_lockdep_class(spinlock_t *lock,
341                                                  unsigned short dev_type)
342 {
343 }
344 static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
345 {
346 }
347 #endif
348 
349 /*******************************************************************************
350 
351                 Protocol management and registration routines
352 
353 *******************************************************************************/
354 
355 /*
356  *      Add a protocol ID to the list. Now that the input handler is
357  *      smarter we can dispense with all the messy stuff that used to be
358  *      here.
359  *
360  *      BEWARE!!! Protocol handlers, mangling input packets,
361  *      MUST BE last in hash buckets and checking protocol handlers
362  *      MUST start from promiscuous ptype_all chain in net_bh.
363  *      It is true now, do not change it.
364  *      Explanation follows: if protocol handler, mangling packet, will
365  *      be the first on list, it is not able to sense, that packet
366  *      is cloned and should be copied-on-write, so that it will
367  *      change it and subsequent readers will get broken packet.
368  *                                                      --ANK (980803)
369  */
370 
371 static inline struct list_head *ptype_head(const struct packet_type *pt)
372 {
373         if (pt->type == htons(ETH_P_ALL))
374                 return pt->dev ? &pt->dev->ptype_all : &ptype_all;
375         else
376                 return pt->dev ? &pt->dev->ptype_specific :
377                                  &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
378 }
379 
380 /**
381  *      dev_add_pack - add packet handler
382  *      @pt: packet type declaration
383  *
384  *      Add a protocol handler to the networking stack. The passed &packet_type
385  *      is linked into kernel lists and may not be freed until it has been
386  *      removed from the kernel lists.
387  *
388  *      This call does not sleep therefore it can not
389  *      guarantee all CPU's that are in middle of receiving packets
390  *      will see the new packet type (until the next received packet).
391  */
392 
393 void dev_add_pack(struct packet_type *pt)
394 {
395         struct list_head *head = ptype_head(pt);
396 
397         spin_lock(&ptype_lock);
398         list_add_rcu(&pt->list, head);
399         spin_unlock(&ptype_lock);
400 }
401 EXPORT_SYMBOL(dev_add_pack);
402 
403 /**
404  *      __dev_remove_pack        - remove packet handler
405  *      @pt: packet type declaration
406  *
407  *      Remove a protocol handler that was previously added to the kernel
408  *      protocol handlers by dev_add_pack(). The passed &packet_type is removed
409  *      from the kernel lists and can be freed or reused once this function
410  *      returns.
411  *
412  *      The packet type might still be in use by receivers
413  *      and must not be freed until after all the CPU's have gone
414  *      through a quiescent state.
415  */
416 void __dev_remove_pack(struct packet_type *pt)
417 {
418         struct list_head *head = ptype_head(pt);
419         struct packet_type *pt1;
420 
421         spin_lock(&ptype_lock);
422 
423         list_for_each_entry(pt1, head, list) {
424                 if (pt == pt1) {
425                         list_del_rcu(&pt->list);
426                         goto out;
427                 }
428         }
429 
430         pr_warn("dev_remove_pack: %p not found\n", pt);
431 out:
432         spin_unlock(&ptype_lock);
433 }
434 EXPORT_SYMBOL(__dev_remove_pack);
435 
436 /**
437  *      dev_remove_pack  - remove packet handler
438  *      @pt: packet type declaration
439  *
440  *      Remove a protocol handler that was previously added to the kernel
441  *      protocol handlers by dev_add_pack(). The passed &packet_type is removed
442  *      from the kernel lists and can be freed or reused once this function
443  *      returns.
444  *
445  *      This call sleeps to guarantee that no CPU is looking at the packet
446  *      type after return.
447  */
448 void dev_remove_pack(struct packet_type *pt)
449 {
450         __dev_remove_pack(pt);
451 
452         synchronize_net();
453 }
454 EXPORT_SYMBOL(dev_remove_pack);
455 
456 
457 /**
458  *      dev_add_offload - register offload handlers
459  *      @po: protocol offload declaration
460  *
461  *      Add protocol offload handlers to the networking stack. The passed
462  *      &proto_offload is linked into kernel lists and may not be freed until
463  *      it has been removed from the kernel lists.
464  *
465  *      This call does not sleep therefore it can not
466  *      guarantee all CPU's that are in middle of receiving packets
467  *      will see the new offload handlers (until the next received packet).
468  */
469 void dev_add_offload(struct packet_offload *po)
470 {
471         struct list_head *head = &offload_base;
472 
473         spin_lock(&offload_lock);
474         list_add_rcu(&po->list, head);
475         spin_unlock(&offload_lock);
476 }
477 EXPORT_SYMBOL(dev_add_offload);
478 
479 /**
480  *      __dev_remove_offload     - remove offload handler
481  *      @po: packet offload declaration
482  *
483  *      Remove a protocol offload handler that was previously added to the
484  *      kernel offload handlers by dev_add_offload(). The passed &offload_type
485  *      is removed from the kernel lists and can be freed or reused once this
486  *      function returns.
487  *
488  *      The packet type might still be in use by receivers
489  *      and must not be freed until after all the CPU's have gone
490  *      through a quiescent state.
491  */
492 static void __dev_remove_offload(struct packet_offload *po)
493 {
494         struct list_head *head = &offload_base;
495         struct packet_offload *po1;
496 
497         spin_lock(&offload_lock);
498 
499         list_for_each_entry(po1, head, list) {
500                 if (po == po1) {
501                         list_del_rcu(&po->list);
502                         goto out;
503                 }
504         }
505 
506         pr_warn("dev_remove_offload: %p not found\n", po);
507 out:
508         spin_unlock(&offload_lock);
509 }
510 
511 /**
512  *      dev_remove_offload       - remove packet offload handler
513  *      @po: packet offload declaration
514  *
515  *      Remove a packet offload handler that was previously added to the kernel
516  *      offload handlers by dev_add_offload(). The passed &offload_type is
517  *      removed from the kernel lists and can be freed or reused once this
518  *      function returns.
519  *
520  *      This call sleeps to guarantee that no CPU is looking at the packet
521  *      type after return.
522  */
523 void dev_remove_offload(struct packet_offload *po)
524 {
525         __dev_remove_offload(po);
526 
527         synchronize_net();
528 }
529 EXPORT_SYMBOL(dev_remove_offload);
530 
531 /******************************************************************************
532 
533                       Device Boot-time Settings Routines
534 
535 *******************************************************************************/
536 
537 /* Boot time configuration table */
538 static struct netdev_boot_setup dev_boot_setup[NETDEV_BOOT_SETUP_MAX];
539 
540 /**
541  *      netdev_boot_setup_add   - add new setup entry
542  *      @name: name of the device
543  *      @map: configured settings for the device
544  *
545  *      Adds new setup entry to the dev_boot_setup list.  The function
546  *      returns 0 on error and 1 on success.  This is a generic routine to
547  *      all netdevices.
548  */
549 static int netdev_boot_setup_add(char *name, struct ifmap *map)
550 {
551         struct netdev_boot_setup *s;
552         int i;
553 
554         s = dev_boot_setup;
555         for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
556                 if (s[i].name[0] == '\0' || s[i].name[0] == ' ') {
557                         memset(s[i].name, 0, sizeof(s[i].name));
558                         strlcpy(s[i].name, name, IFNAMSIZ);
559                         memcpy(&s[i].map, map, sizeof(s[i].map));
560                         break;
561                 }
562         }
563 
564         return i >= NETDEV_BOOT_SETUP_MAX ? 0 : 1;
565 }
566 
567 /**
568  *      netdev_boot_setup_check - check boot time settings
569  *      @dev: the netdevice
570  *
571  *      Check boot time settings for the device.
572  *      The found settings are set for the device to be used
573  *      later in the device probing.
574  *      Returns 0 if no settings found, 1 if they are.
575  */
576 int netdev_boot_setup_check(struct net_device *dev)
577 {
578         struct netdev_boot_setup *s = dev_boot_setup;
579         int i;
580 
581         for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++) {
582                 if (s[i].name[0] != '\0' && s[i].name[0] != ' ' &&
583                     !strcmp(dev->name, s[i].name)) {
584                         dev->irq        = s[i].map.irq;
585                         dev->base_addr  = s[i].map.base_addr;
586                         dev->mem_start  = s[i].map.mem_start;
587                         dev->mem_end    = s[i].map.mem_end;
588                         return 1;
589                 }
590         }
591         return 0;
592 }
593 EXPORT_SYMBOL(netdev_boot_setup_check);
594 
595 
596 /**
597  *      netdev_boot_base        - get address from boot time settings
598  *      @prefix: prefix for network device
599  *      @unit: id for network device
600  *
601  *      Check boot time settings for the base address of device.
602  *      The found settings are set for the device to be used
603  *      later in the device probing.
604  *      Returns 0 if no settings found.
605  */
606 unsigned long netdev_boot_base(const char *prefix, int unit)
607 {
608         const struct netdev_boot_setup *s = dev_boot_setup;
609         char name[IFNAMSIZ];
610         int i;
611 
612         sprintf(name, "%s%d", prefix, unit);
613 
614         /*
615          * If device already registered then return base of 1
616          * to indicate not to probe for this interface
617          */
618         if (__dev_get_by_name(&init_net, name))
619                 return 1;
620 
621         for (i = 0; i < NETDEV_BOOT_SETUP_MAX; i++)
622                 if (!strcmp(name, s[i].name))
623                         return s[i].map.base_addr;
624         return 0;
625 }
626 
627 /*
628  * Saves at boot time configured settings for any netdevice.
629  */
630 int __init netdev_boot_setup(char *str)
631 {
632         int ints[5];
633         struct ifmap map;
634 
635         str = get_options(str, ARRAY_SIZE(ints), ints);
636         if (!str || !*str)
637                 return 0;
638 
639         /* Save settings */
640         memset(&map, 0, sizeof(map));
641         if (ints[0] > 0)
642                 map.irq = ints[1];
643         if (ints[0] > 1)
644                 map.base_addr = ints[2];
645         if (ints[0] > 2)
646                 map.mem_start = ints[3];
647         if (ints[0] > 3)
648                 map.mem_end = ints[4];
649 
650         /* Add new entry to the list */
651         return netdev_boot_setup_add(str, &map);
652 }
653 
654 __setup("netdev=", netdev_boot_setup);
655 
656 /*******************************************************************************
657 
658                             Device Interface Subroutines
659 
660 *******************************************************************************/
661 
662 /**
663  *      dev_get_iflink  - get 'iflink' value of a interface
664  *      @dev: targeted interface
665  *
666  *      Indicates the ifindex the interface is linked to.
667  *      Physical interfaces have the same 'ifindex' and 'iflink' values.
668  */
669 
670 int dev_get_iflink(const struct net_device *dev)
671 {
672         if (dev->netdev_ops && dev->netdev_ops->ndo_get_iflink)
673                 return dev->netdev_ops->ndo_get_iflink(dev);
674 
675         /* If dev->rtnl_link_ops is set, it's a virtual interface. */
676         if (dev->rtnl_link_ops)
677                 return 0;
678 
679         return dev->ifindex;
680 }
681 EXPORT_SYMBOL(dev_get_iflink);
682 
683 /**
684  *      __dev_get_by_name       - find a device by its name
685  *      @net: the applicable net namespace
686  *      @name: name to find
687  *
688  *      Find an interface by name. Must be called under RTNL semaphore
689  *      or @dev_base_lock. If the name is found a pointer to the device
690  *      is returned. If the name is not found then %NULL is returned. The
691  *      reference counters are not incremented so the caller must be
692  *      careful with locks.
693  */
694 
695 struct net_device *__dev_get_by_name(struct net *net, const char *name)
696 {
697         struct net_device *dev;
698         struct hlist_head *head = dev_name_hash(net, name);
699 
700         hlist_for_each_entry(dev, head, name_hlist)
701                 if (!strncmp(dev->name, name, IFNAMSIZ))
702                         return dev;
703 
704         return NULL;
705 }
706 EXPORT_SYMBOL(__dev_get_by_name);
707 
708 /**
709  *      dev_get_by_name_rcu     - find a device by its name
710  *      @net: the applicable net namespace
711  *      @name: name to find
712  *
713  *      Find an interface by name.
714  *      If the name is found a pointer to the device is returned.
715  *      If the name is not found then %NULL is returned.
716  *      The reference counters are not incremented so the caller must be
717  *      careful with locks. The caller must hold RCU lock.
718  */
719 
720 struct net_device *dev_get_by_name_rcu(struct net *net, const char *name)
721 {
722         struct net_device *dev;
723         struct hlist_head *head = dev_name_hash(net, name);
724 
725         hlist_for_each_entry_rcu(dev, head, name_hlist)
726                 if (!strncmp(dev->name, name, IFNAMSIZ))
727                         return dev;
728 
729         return NULL;
730 }
731 EXPORT_SYMBOL(dev_get_by_name_rcu);
732 
733 /**
734  *      dev_get_by_name         - find a device by its name
735  *      @net: the applicable net namespace
736  *      @name: name to find
737  *
738  *      Find an interface by name. This can be called from any
739  *      context and does its own locking. The returned handle has
740  *      the usage count incremented and the caller must use dev_put() to
741  *      release it when it is no longer needed. %NULL is returned if no
742  *      matching device is found.
743  */
744 
745 struct net_device *dev_get_by_name(struct net *net, const char *name)
746 {
747         struct net_device *dev;
748 
749         rcu_read_lock();
750         dev = dev_get_by_name_rcu(net, name);
751         if (dev)
752                 dev_hold(dev);
753         rcu_read_unlock();
754         return dev;
755 }
756 EXPORT_SYMBOL(dev_get_by_name);
757 
758 /**
759  *      __dev_get_by_index - find a device by its ifindex
760  *      @net: the applicable net namespace
761  *      @ifindex: index of device
762  *
763  *      Search for an interface by index. Returns %NULL if the device
764  *      is not found or a pointer to the device. The device has not
765  *      had its reference counter increased so the caller must be careful
766  *      about locking. The caller must hold either the RTNL semaphore
767  *      or @dev_base_lock.
768  */
769 
770 struct net_device *__dev_get_by_index(struct net *net, int ifindex)
771 {
772         struct net_device *dev;
773         struct hlist_head *head = dev_index_hash(net, ifindex);
774 
775         hlist_for_each_entry(dev, head, index_hlist)
776                 if (dev->ifindex == ifindex)
777                         return dev;
778 
779         return NULL;
780 }
781 EXPORT_SYMBOL(__dev_get_by_index);
782 
783 /**
784  *      dev_get_by_index_rcu - find a device by its ifindex
785  *      @net: the applicable net namespace
786  *      @ifindex: index of device
787  *
788  *      Search for an interface by index. Returns %NULL if the device
789  *      is not found or a pointer to the device. The device has not
790  *      had its reference counter increased so the caller must be careful
791  *      about locking. The caller must hold RCU lock.
792  */
793 
794 struct net_device *dev_get_by_index_rcu(struct net *net, int ifindex)
795 {
796         struct net_device *dev;
797         struct hlist_head *head = dev_index_hash(net, ifindex);
798 
799         hlist_for_each_entry_rcu(dev, head, index_hlist)
800                 if (dev->ifindex == ifindex)
801                         return dev;
802 
803         return NULL;
804 }
805 EXPORT_SYMBOL(dev_get_by_index_rcu);
806 
807 
808 /**
809  *      dev_get_by_index - find a device by its ifindex
810  *      @net: the applicable net namespace
811  *      @ifindex: index of device
812  *
813  *      Search for an interface by index. Returns NULL if the device
814  *      is not found or a pointer to the device. The device returned has
815  *      had a reference added and the pointer is safe until the user calls
816  *      dev_put to indicate they have finished with it.
817  */
818 
819 struct net_device *dev_get_by_index(struct net *net, int ifindex)
820 {
821         struct net_device *dev;
822 
823         rcu_read_lock();
824         dev = dev_get_by_index_rcu(net, ifindex);
825         if (dev)
826                 dev_hold(dev);
827         rcu_read_unlock();
828         return dev;
829 }
830 EXPORT_SYMBOL(dev_get_by_index);
831 
832 /**
833  *      netdev_get_name - get a netdevice name, knowing its ifindex.
834  *      @net: network namespace
835  *      @name: a pointer to the buffer where the name will be stored.
836  *      @ifindex: the ifindex of the interface to get the name from.
837  *
838  *      The use of raw_seqcount_begin() and cond_resched() before
839  *      retrying is required as we want to give the writers a chance
840  *      to complete when CONFIG_PREEMPT is not set.
841  */
842 int netdev_get_name(struct net *net, char *name, int ifindex)
843 {
844         struct net_device *dev;
845         unsigned int seq;
846 
847 retry:
848         seq = raw_seqcount_begin(&devnet_rename_seq);
849         rcu_read_lock();
850         dev = dev_get_by_index_rcu(net, ifindex);
851         if (!dev) {
852                 rcu_read_unlock();
853                 return -ENODEV;
854         }
855 
856         strcpy(name, dev->name);
857         rcu_read_unlock();
858         if (read_seqcount_retry(&devnet_rename_seq, seq)) {
859                 cond_resched();
860                 goto retry;
861         }
862 
863         return 0;
864 }
865 
866 /**
867  *      dev_getbyhwaddr_rcu - find a device by its hardware address
868  *      @net: the applicable net namespace
869  *      @type: media type of device
870  *      @ha: hardware address
871  *
872  *      Search for an interface by MAC address. Returns NULL if the device
873  *      is not found or a pointer to the device.
874  *      The caller must hold RCU or RTNL.
875  *      The returned device has not had its ref count increased
876  *      and the caller must therefore be careful about locking
877  *
878  */
879 
880 struct net_device *dev_getbyhwaddr_rcu(struct net *net, unsigned short type,
881                                        const char *ha)
882 {
883         struct net_device *dev;
884 
885         for_each_netdev_rcu(net, dev)
886                 if (dev->type == type &&
887                     !memcmp(dev->dev_addr, ha, dev->addr_len))
888                         return dev;
889 
890         return NULL;
891 }
892 EXPORT_SYMBOL(dev_getbyhwaddr_rcu);
893 
894 struct net_device *__dev_getfirstbyhwtype(struct net *net, unsigned short type)
895 {
896         struct net_device *dev;
897 
898         ASSERT_RTNL();
899         for_each_netdev(net, dev)
900                 if (dev->type == type)
901                         return dev;
902 
903         return NULL;
904 }
905 EXPORT_SYMBOL(__dev_getfirstbyhwtype);
906 
907 struct net_device *dev_getfirstbyhwtype(struct net *net, unsigned short type)
908 {
909         struct net_device *dev, *ret = NULL;
910 
911         rcu_read_lock();
912         for_each_netdev_rcu(net, dev)
913                 if (dev->type == type) {
914                         dev_hold(dev);
915                         ret = dev;
916                         break;
917                 }
918         rcu_read_unlock();
919         return ret;
920 }
921 EXPORT_SYMBOL(dev_getfirstbyhwtype);
922 
923 /**
924  *      __dev_get_by_flags - find any device with given flags
925  *      @net: the applicable net namespace
926  *      @if_flags: IFF_* values
927  *      @mask: bitmask of bits in if_flags to check
928  *
929  *      Search for any interface with the given flags. Returns NULL if a device
930  *      is not found or a pointer to the device. Must be called inside
931  *      rtnl_lock(), and result refcount is unchanged.
932  */
933 
934 struct net_device *__dev_get_by_flags(struct net *net, unsigned short if_flags,
935                                       unsigned short mask)
936 {
937         struct net_device *dev, *ret;
938 
939         ASSERT_RTNL();
940 
941         ret = NULL;
942         for_each_netdev(net, dev) {
943                 if (((dev->flags ^ if_flags) & mask) == 0) {
944                         ret = dev;
945                         break;
946                 }
947         }
948         return ret;
949 }
950 EXPORT_SYMBOL(__dev_get_by_flags);
951 
952 /**
953  *      dev_valid_name - check if name is okay for network device
954  *      @name: name string
955  *
956  *      Network device names need to be valid file names to
957  *      to allow sysfs to work.  We also disallow any kind of
958  *      whitespace.
959  */
960 bool dev_valid_name(const char *name)
961 {
962         if (*name == '\0')
963                 return false;
964         if (strlen(name) >= IFNAMSIZ)
965                 return false;
966         if (!strcmp(name, ".") || !strcmp(name, ".."))
967                 return false;
968 
969         while (*name) {
970                 if (*name == '/' || *name == ':' || isspace(*name))
971                         return false;
972                 name++;
973         }
974         return true;
975 }
976 EXPORT_SYMBOL(dev_valid_name);
977 
978 /**
979  *      __dev_alloc_name - allocate a name for a device
980  *      @net: network namespace to allocate the device name in
981  *      @name: name format string
982  *      @buf:  scratch buffer and result name string
983  *
984  *      Passed a format string - eg "lt%d" it will try and find a suitable
985  *      id. It scans list of devices to build up a free map, then chooses
986  *      the first empty slot. The caller must hold the dev_base or rtnl lock
987  *      while allocating the name and adding the device in order to avoid
988  *      duplicates.
989  *      Limited to bits_per_byte * page size devices (ie 32K on most platforms).
990  *      Returns the number of the unit assigned or a negative errno code.
991  */
992 
993 static int __dev_alloc_name(struct net *net, const char *name, char *buf)
994 {
995         int i = 0;
996         const char *p;
997         const int max_netdevices = 8*PAGE_SIZE;
998         unsigned long *inuse;
999         struct net_device *d;
1000 
1001         p = strnchr(name, IFNAMSIZ-1, '%');
1002         if (p) {
1003                 /*
1004                  * Verify the string as this thing may have come from
1005                  * the user.  There must be either one "%d" and no other "%"
1006                  * characters.
1007                  */
1008                 if (p[1] != 'd' || strchr(p + 2, '%'))
1009                         return -EINVAL;
1010 
1011                 /* Use one page as a bit array of possible slots */
1012                 inuse = (unsigned long *) get_zeroed_page(GFP_ATOMIC);
1013                 if (!inuse)
1014                         return -ENOMEM;
1015 
1016                 for_each_netdev(net, d) {
1017                         if (!sscanf(d->name, name, &i))
1018                                 continue;
1019                         if (i < 0 || i >= max_netdevices)
1020                                 continue;
1021 
1022                         /*  avoid cases where sscanf is not exact inverse of printf */
1023                         snprintf(buf, IFNAMSIZ, name, i);
1024                         if (!strncmp(buf, d->name, IFNAMSIZ))
1025                                 set_bit(i, inuse);
1026                 }
1027 
1028                 i = find_first_zero_bit(inuse, max_netdevices);
1029                 free_page((unsigned long) inuse);
1030         }
1031 
1032         if (buf != name)
1033                 snprintf(buf, IFNAMSIZ, name, i);
1034         if (!__dev_get_by_name(net, buf))
1035                 return i;
1036 
1037         /* It is possible to run out of possible slots
1038          * when the name is long and there isn't enough space left
1039          * for the digits, or if all bits are used.
1040          */
1041         return -ENFILE;
1042 }
1043 
1044 /**
1045  *      dev_alloc_name - allocate a name for a device
1046  *      @dev: device
1047  *      @name: name format string
1048  *
1049  *      Passed a format string - eg "lt%d" it will try and find a suitable
1050  *      id. It scans list of devices to build up a free map, then chooses
1051  *      the first empty slot. The caller must hold the dev_base or rtnl lock
1052  *      while allocating the name and adding the device in order to avoid
1053  *      duplicates.
1054  *      Limited to bits_per_byte * page size devices (ie 32K on most platforms).
1055  *      Returns the number of the unit assigned or a negative errno code.
1056  */
1057 
1058 int dev_alloc_name(struct net_device *dev, const char *name)
1059 {
1060         char buf[IFNAMSIZ];
1061         struct net *net;
1062         int ret;
1063 
1064         BUG_ON(!dev_net(dev));
1065         net = dev_net(dev);
1066         ret = __dev_alloc_name(net, name, buf);
1067         if (ret >= 0)
1068                 strlcpy(dev->name, buf, IFNAMSIZ);
1069         return ret;
1070 }
1071 EXPORT_SYMBOL(dev_alloc_name);
1072 
1073 static int dev_alloc_name_ns(struct net *net,
1074                              struct net_device *dev,
1075                              const char *name)
1076 {
1077         char buf[IFNAMSIZ];
1078         int ret;
1079 
1080         ret = __dev_alloc_name(net, name, buf);
1081         if (ret >= 0)
1082                 strlcpy(dev->name, buf, IFNAMSIZ);
1083         return ret;
1084 }
1085 
1086 static int dev_get_valid_name(struct net *net,
1087                               struct net_device *dev,
1088                               const char *name)
1089 {
1090         BUG_ON(!net);
1091 
1092         if (!dev_valid_name(name))
1093                 return -EINVAL;
1094 
1095         if (strchr(name, '%'))
1096                 return dev_alloc_name_ns(net, dev, name);
1097         else if (__dev_get_by_name(net, name))
1098                 return -EEXIST;
1099         else if (dev->name != name)
1100                 strlcpy(dev->name, name, IFNAMSIZ);
1101 
1102         return 0;
1103 }
1104 
1105 /**
1106  *      dev_change_name - change name of a device
1107  *      @dev: device
1108  *      @newname: name (or format string) must be at least IFNAMSIZ
1109  *
1110  *      Change name of a device, can pass format strings "eth%d".
1111  *      for wildcarding.
1112  */
1113 int dev_change_name(struct net_device *dev, const char *newname)
1114 {
1115         unsigned char old_assign_type;
1116         char oldname[IFNAMSIZ];
1117         int err = 0;
1118         int ret;
1119         struct net *net;
1120 
1121         ASSERT_RTNL();
1122         BUG_ON(!dev_net(dev));
1123 
1124         net = dev_net(dev);
1125         if (dev->flags & IFF_UP)
1126                 return -EBUSY;
1127 
1128         write_seqcount_begin(&devnet_rename_seq);
1129 
1130         if (strncmp(newname, dev->name, IFNAMSIZ) == 0) {
1131                 write_seqcount_end(&devnet_rename_seq);
1132                 return 0;
1133         }
1134 
1135         memcpy(oldname, dev->name, IFNAMSIZ);
1136 
1137         err = dev_get_valid_name(net, dev, newname);
1138         if (err < 0) {
1139                 write_seqcount_end(&devnet_rename_seq);
1140                 return err;
1141         }
1142 
1143         if (oldname[0] && !strchr(oldname, '%'))
1144                 netdev_info(dev, "renamed from %s\n", oldname);
1145 
1146         old_assign_type = dev->name_assign_type;
1147         dev->name_assign_type = NET_NAME_RENAMED;
1148 
1149 rollback:
1150         ret = device_rename(&dev->dev, dev->name);
1151         if (ret) {
1152                 memcpy(dev->name, oldname, IFNAMSIZ);
1153                 dev->name_assign_type = old_assign_type;
1154                 write_seqcount_end(&devnet_rename_seq);
1155                 return ret;
1156         }
1157 
1158         write_seqcount_end(&devnet_rename_seq);
1159 
1160         netdev_adjacent_rename_links(dev, oldname);
1161 
1162         write_lock_bh(&dev_base_lock);
1163         hlist_del_rcu(&dev->name_hlist);
1164         write_unlock_bh(&dev_base_lock);
1165 
1166         synchronize_rcu();
1167 
1168         write_lock_bh(&dev_base_lock);
1169         hlist_add_head_rcu(&dev->name_hlist, dev_name_hash(net, dev->name));
1170         write_unlock_bh(&dev_base_lock);
1171 
1172         ret = call_netdevice_notifiers(NETDEV_CHANGENAME, dev);
1173         ret = notifier_to_errno(ret);
1174 
1175         if (ret) {
1176                 /* err >= 0 after dev_alloc_name() or stores the first errno */
1177                 if (err >= 0) {
1178                         err = ret;
1179                         write_seqcount_begin(&devnet_rename_seq);
1180                         memcpy(dev->name, oldname, IFNAMSIZ);
1181                         memcpy(oldname, newname, IFNAMSIZ);
1182                         dev->name_assign_type = old_assign_type;
1183                         old_assign_type = NET_NAME_RENAMED;
1184                         goto rollback;
1185                 } else {
1186                         pr_err("%s: name change rollback failed: %d\n",
1187                                dev->name, ret);
1188                 }
1189         }
1190 
1191         return err;
1192 }
1193 
1194 /**
1195  *      dev_set_alias - change ifalias of a device
1196  *      @dev: device
1197  *      @alias: name up to IFALIASZ
1198  *      @len: limit of bytes to copy from info
1199  *
1200  *      Set ifalias for a device,
1201  */
1202 int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
1203 {
1204         char *new_ifalias;
1205 
1206         ASSERT_RTNL();
1207 
1208         if (len >= IFALIASZ)
1209                 return -EINVAL;
1210 
1211         if (!len) {
1212                 kfree(dev->ifalias);
1213                 dev->ifalias = NULL;
1214                 return 0;
1215         }
1216 
1217         new_ifalias = krealloc(dev->ifalias, len + 1, GFP_KERNEL);
1218         if (!new_ifalias)
1219                 return -ENOMEM;
1220         dev->ifalias = new_ifalias;
1221 
1222         strlcpy(dev->ifalias, alias, len+1);
1223         return len;
1224 }
1225 
1226 
1227 /**
1228  *      netdev_features_change - device changes features
1229  *      @dev: device to cause notification
1230  *
1231  *      Called to indicate a device has changed features.
1232  */
1233 void netdev_features_change(struct net_device *dev)
1234 {
1235         call_netdevice_notifiers(NETDEV_FEAT_CHANGE, dev);
1236 }
1237 EXPORT_SYMBOL(netdev_features_change);
1238 
1239 /**
1240  *      netdev_state_change - device changes state
1241  *      @dev: device to cause notification
1242  *
1243  *      Called to indicate a device has changed state. This function calls
1244  *      the notifier chains for netdev_chain and sends a NEWLINK message
1245  *      to the routing socket.
1246  */
1247 void netdev_state_change(struct net_device *dev)
1248 {
1249         if (dev->flags & IFF_UP) {
1250                 struct netdev_notifier_change_info change_info;
1251 
1252                 change_info.flags_changed = 0;
1253                 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
1254                                               &change_info.info);
1255                 rtmsg_ifinfo(RTM_NEWLINK, dev, 0, GFP_KERNEL);
1256         }
1257 }
1258 EXPORT_SYMBOL(netdev_state_change);
1259 
1260 /**
1261  *      netdev_notify_peers - notify network peers about existence of @dev
1262  *      @dev: network device
1263  *
1264  * Generate traffic such that interested network peers are aware of
1265  * @dev, such as by generating a gratuitous ARP. This may be used when
1266  * a device wants to inform the rest of the network about some sort of
1267  * reconfiguration such as a failover event or virtual machine
1268  * migration.
1269  */
1270 void netdev_notify_peers(struct net_device *dev)
1271 {
1272         rtnl_lock();
1273         call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, dev);
1274         rtnl_unlock();
1275 }
1276 EXPORT_SYMBOL(netdev_notify_peers);
1277 
1278 static int __dev_open(struct net_device *dev)
1279 {
1280         const struct net_device_ops *ops = dev->netdev_ops;
1281         int ret;
1282 
1283         ASSERT_RTNL();
1284 
1285         if (!netif_device_present(dev))
1286                 return -ENODEV;
1287 
1288         /* Block netpoll from trying to do any rx path servicing.
1289          * If we don't do this there is a chance ndo_poll_controller
1290          * or ndo_poll may be running while we open the device
1291          */
1292         netpoll_poll_disable(dev);
1293 
1294         ret = call_netdevice_notifiers(NETDEV_PRE_UP, dev);
1295         ret = notifier_to_errno(ret);
1296         if (ret)
1297                 return ret;
1298 
1299         set_bit(__LINK_STATE_START, &dev->state);
1300 
1301         if (ops->ndo_validate_addr)
1302                 ret = ops->ndo_validate_addr(dev);
1303 
1304         if (!ret && ops->ndo_open)
1305                 ret = ops->ndo_open(dev);
1306 
1307         netpoll_poll_enable(dev);
1308 
1309         if (ret)
1310                 clear_bit(__LINK_STATE_START, &dev->state);
1311         else {
1312                 dev->flags |= IFF_UP;
1313                 dev_set_rx_mode(dev);
1314                 dev_activate(dev);
1315                 add_device_randomness(dev->dev_addr, dev->addr_len);
1316         }
1317 
1318         return ret;
1319 }
1320 
1321 /**
1322  *      dev_open        - prepare an interface for use.
1323  *      @dev:   device to open
1324  *
1325  *      Takes a device from down to up state. The device's private open
1326  *      function is invoked and then the multicast lists are loaded. Finally
1327  *      the device is moved into the up state and a %NETDEV_UP message is
1328  *      sent to the netdev notifier chain.
1329  *
1330  *      Calling this function on an active interface is a nop. On a failure
1331  *      a negative errno code is returned.
1332  */
1333 int dev_open(struct net_device *dev)
1334 {
1335         int ret;
1336 
1337         if (dev->flags & IFF_UP)
1338                 return 0;
1339 
1340         ret = __dev_open(dev);
1341         if (ret < 0)
1342                 return ret;
1343 
1344         rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1345         call_netdevice_notifiers(NETDEV_UP, dev);
1346 
1347         return ret;
1348 }
1349 EXPORT_SYMBOL(dev_open);
1350 
1351 static int __dev_close_many(struct list_head *head)
1352 {
1353         struct net_device *dev;
1354 
1355         ASSERT_RTNL();
1356         might_sleep();
1357 
1358         list_for_each_entry(dev, head, close_list) {
1359                 /* Temporarily disable netpoll until the interface is down */
1360                 netpoll_poll_disable(dev);
1361 
1362                 call_netdevice_notifiers(NETDEV_GOING_DOWN, dev);
1363 
1364                 clear_bit(__LINK_STATE_START, &dev->state);
1365 
1366                 /* Synchronize to scheduled poll. We cannot touch poll list, it
1367                  * can be even on different cpu. So just clear netif_running().
1368                  *
1369                  * dev->stop() will invoke napi_disable() on all of it's
1370                  * napi_struct instances on this device.
1371                  */
1372                 smp_mb__after_atomic(); /* Commit netif_running(). */
1373         }
1374 
1375         dev_deactivate_many(head);
1376 
1377         list_for_each_entry(dev, head, close_list) {
1378                 const struct net_device_ops *ops = dev->netdev_ops;
1379 
1380                 /*
1381                  *      Call the device specific close. This cannot fail.
1382                  *      Only if device is UP
1383                  *
1384                  *      We allow it to be called even after a DETACH hot-plug
1385                  *      event.
1386                  */
1387                 if (ops->ndo_stop)
1388                         ops->ndo_stop(dev);
1389 
1390                 dev->flags &= ~IFF_UP;
1391                 netpoll_poll_enable(dev);
1392         }
1393 
1394         return 0;
1395 }
1396 
1397 static int __dev_close(struct net_device *dev)
1398 {
1399         int retval;
1400         LIST_HEAD(single);
1401 
1402         list_add(&dev->close_list, &single);
1403         retval = __dev_close_many(&single);
1404         list_del(&single);
1405 
1406         return retval;
1407 }
1408 
1409 int dev_close_many(struct list_head *head, bool unlink)
1410 {
1411         struct net_device *dev, *tmp;
1412 
1413         /* Remove the devices that don't need to be closed */
1414         list_for_each_entry_safe(dev, tmp, head, close_list)
1415                 if (!(dev->flags & IFF_UP))
1416                         list_del_init(&dev->close_list);
1417 
1418         __dev_close_many(head);
1419 
1420         list_for_each_entry_safe(dev, tmp, head, close_list) {
1421                 rtmsg_ifinfo(RTM_NEWLINK, dev, IFF_UP|IFF_RUNNING, GFP_KERNEL);
1422                 call_netdevice_notifiers(NETDEV_DOWN, dev);
1423                 if (unlink)
1424                         list_del_init(&dev->close_list);
1425         }
1426 
1427         return 0;
1428 }
1429 EXPORT_SYMBOL(dev_close_many);
1430 
1431 /**
1432  *      dev_close - shutdown an interface.
1433  *      @dev: device to shutdown
1434  *
1435  *      This function moves an active device into down state. A
1436  *      %NETDEV_GOING_DOWN is sent to the netdev notifier chain. The device
1437  *      is then deactivated and finally a %NETDEV_DOWN is sent to the notifier
1438  *      chain.
1439  */
1440 int dev_close(struct net_device *dev)
1441 {
1442         if (dev->flags & IFF_UP) {
1443                 LIST_HEAD(single);
1444 
1445                 list_add(&dev->close_list, &single);
1446                 dev_close_many(&single, true);
1447                 list_del(&single);
1448         }
1449         return 0;
1450 }
1451 EXPORT_SYMBOL(dev_close);
1452 
1453 
1454 /**
1455  *      dev_disable_lro - disable Large Receive Offload on a device
1456  *      @dev: device
1457  *
1458  *      Disable Large Receive Offload (LRO) on a net device.  Must be
1459  *      called under RTNL.  This is needed if received packets may be
1460  *      forwarded to another interface.
1461  */
1462 void dev_disable_lro(struct net_device *dev)
1463 {
1464         struct net_device *lower_dev;
1465         struct list_head *iter;
1466 
1467         dev->wanted_features &= ~NETIF_F_LRO;
1468         netdev_update_features(dev);
1469 
1470         if (unlikely(dev->features & NETIF_F_LRO))
1471                 netdev_WARN(dev, "failed to disable LRO!\n");
1472 
1473         netdev_for_each_lower_dev(dev, lower_dev, iter)
1474                 dev_disable_lro(lower_dev);
1475 }
1476 EXPORT_SYMBOL(dev_disable_lro);
1477 
1478 static int call_netdevice_notifier(struct notifier_block *nb, unsigned long val,
1479                                    struct net_device *dev)
1480 {
1481         struct netdev_notifier_info info;
1482 
1483         netdev_notifier_info_init(&info, dev);
1484         return nb->notifier_call(nb, val, &info);
1485 }
1486 
1487 static int dev_boot_phase = 1;
1488 
1489 /**
1490  *      register_netdevice_notifier - register a network notifier block
1491  *      @nb: notifier
1492  *
1493  *      Register a notifier to be called when network device events occur.
1494  *      The notifier passed is linked into the kernel structures and must
1495  *      not be reused until it has been unregistered. A negative errno code
1496  *      is returned on a failure.
1497  *
1498  *      When registered all registration and up events are replayed
1499  *      to the new notifier to allow device to have a race free
1500  *      view of the network device list.
1501  */
1502 
1503 int register_netdevice_notifier(struct notifier_block *nb)
1504 {
1505         struct net_device *dev;
1506         struct net_device *last;
1507         struct net *net;
1508         int err;
1509 
1510         rtnl_lock();
1511         err = raw_notifier_chain_register(&netdev_chain, nb);
1512         if (err)
1513                 goto unlock;
1514         if (dev_boot_phase)
1515                 goto unlock;
1516         for_each_net(net) {
1517                 for_each_netdev(net, dev) {
1518                         err = call_netdevice_notifier(nb, NETDEV_REGISTER, dev);
1519                         err = notifier_to_errno(err);
1520                         if (err)
1521                                 goto rollback;
1522 
1523                         if (!(dev->flags & IFF_UP))
1524                                 continue;
1525 
1526                         call_netdevice_notifier(nb, NETDEV_UP, dev);
1527                 }
1528         }
1529 
1530 unlock:
1531         rtnl_unlock();
1532         return err;
1533 
1534 rollback:
1535         last = dev;
1536         for_each_net(net) {
1537                 for_each_netdev(net, dev) {
1538                         if (dev == last)
1539                                 goto outroll;
1540 
1541                         if (dev->flags & IFF_UP) {
1542                                 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1543                                                         dev);
1544                                 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1545                         }
1546                         call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1547                 }
1548         }
1549 
1550 outroll:
1551         raw_notifier_chain_unregister(&netdev_chain, nb);
1552         goto unlock;
1553 }
1554 EXPORT_SYMBOL(register_netdevice_notifier);
1555 
1556 /**
1557  *      unregister_netdevice_notifier - unregister a network notifier block
1558  *      @nb: notifier
1559  *
1560  *      Unregister a notifier previously registered by
1561  *      register_netdevice_notifier(). The notifier is unlinked into the
1562  *      kernel structures and may then be reused. A negative errno code
1563  *      is returned on a failure.
1564  *
1565  *      After unregistering unregister and down device events are synthesized
1566  *      for all devices on the device list to the removed notifier to remove
1567  *      the need for special case cleanup code.
1568  */
1569 
1570 int unregister_netdevice_notifier(struct notifier_block *nb)
1571 {
1572         struct net_device *dev;
1573         struct net *net;
1574         int err;
1575 
1576         rtnl_lock();
1577         err = raw_notifier_chain_unregister(&netdev_chain, nb);
1578         if (err)
1579                 goto unlock;
1580 
1581         for_each_net(net) {
1582                 for_each_netdev(net, dev) {
1583                         if (dev->flags & IFF_UP) {
1584                                 call_netdevice_notifier(nb, NETDEV_GOING_DOWN,
1585                                                         dev);
1586                                 call_netdevice_notifier(nb, NETDEV_DOWN, dev);
1587                         }
1588                         call_netdevice_notifier(nb, NETDEV_UNREGISTER, dev);
1589                 }
1590         }
1591 unlock:
1592         rtnl_unlock();
1593         return err;
1594 }
1595 EXPORT_SYMBOL(unregister_netdevice_notifier);
1596 
1597 /**
1598  *      call_netdevice_notifiers_info - call all network notifier blocks
1599  *      @val: value passed unmodified to notifier function
1600  *      @dev: net_device pointer passed unmodified to notifier function
1601  *      @info: notifier information data
1602  *
1603  *      Call all network notifier blocks.  Parameters and return value
1604  *      are as for raw_notifier_call_chain().
1605  */
1606 
1607 static int call_netdevice_notifiers_info(unsigned long val,
1608                                          struct net_device *dev,
1609                                          struct netdev_notifier_info *info)
1610 {
1611         ASSERT_RTNL();
1612         netdev_notifier_info_init(info, dev);
1613         return raw_notifier_call_chain(&netdev_chain, val, info);
1614 }
1615 
1616 /**
1617  *      call_netdevice_notifiers - call all network notifier blocks
1618  *      @val: value passed unmodified to notifier function
1619  *      @dev: net_device pointer passed unmodified to notifier function
1620  *
1621  *      Call all network notifier blocks.  Parameters and return value
1622  *      are as for raw_notifier_call_chain().
1623  */
1624 
1625 int call_netdevice_notifiers(unsigned long val, struct net_device *dev)
1626 {
1627         struct netdev_notifier_info info;
1628 
1629         return call_netdevice_notifiers_info(val, dev, &info);
1630 }
1631 EXPORT_SYMBOL(call_netdevice_notifiers);
1632 
1633 #ifdef CONFIG_NET_CLS_ACT
1634 static struct static_key ingress_needed __read_mostly;
1635 
1636 void net_inc_ingress_queue(void)
1637 {
1638         static_key_slow_inc(&ingress_needed);
1639 }
1640 EXPORT_SYMBOL_GPL(net_inc_ingress_queue);
1641 
1642 void net_dec_ingress_queue(void)
1643 {
1644         static_key_slow_dec(&ingress_needed);
1645 }
1646 EXPORT_SYMBOL_GPL(net_dec_ingress_queue);
1647 #endif
1648 
1649 static struct static_key netstamp_needed __read_mostly;
1650 #ifdef HAVE_JUMP_LABEL
1651 /* We are not allowed to call static_key_slow_dec() from irq context
1652  * If net_disable_timestamp() is called from irq context, defer the
1653  * static_key_slow_dec() calls.
1654  */
1655 static atomic_t netstamp_needed_deferred;
1656 #endif
1657 
1658 void net_enable_timestamp(void)
1659 {
1660 #ifdef HAVE_JUMP_LABEL
1661         int deferred = atomic_xchg(&netstamp_needed_deferred, 0);
1662 
1663         if (deferred) {
1664                 while (--deferred)
1665                         static_key_slow_dec(&netstamp_needed);
1666                 return;
1667         }
1668 #endif
1669         static_key_slow_inc(&netstamp_needed);
1670 }
1671 EXPORT_SYMBOL(net_enable_timestamp);
1672 
1673 void net_disable_timestamp(void)
1674 {
1675 #ifdef HAVE_JUMP_LABEL
1676         if (in_interrupt()) {
1677                 atomic_inc(&netstamp_needed_deferred);
1678                 return;
1679         }
1680 #endif
1681         static_key_slow_dec(&netstamp_needed);
1682 }
1683 EXPORT_SYMBOL(net_disable_timestamp);
1684 
1685 static inline void net_timestamp_set(struct sk_buff *skb)
1686 {
1687         skb->tstamp.tv64 = 0;
1688         if (static_key_false(&netstamp_needed))
1689                 __net_timestamp(skb);
1690 }
1691 
1692 #define net_timestamp_check(COND, SKB)                  \
1693         if (static_key_false(&netstamp_needed)) {               \
1694                 if ((COND) && !(SKB)->tstamp.tv64)      \
1695                         __net_timestamp(SKB);           \
1696         }                                               \
1697 
1698 bool is_skb_forwardable(struct net_device *dev, struct sk_buff *skb)
1699 {
1700         unsigned int len;
1701 
1702         if (!(dev->flags & IFF_UP))
1703                 return false;
1704 
1705         len = dev->mtu + dev->hard_header_len + VLAN_HLEN;
1706         if (skb->len <= len)
1707                 return true;
1708 
1709         /* if TSO is enabled, we don't care about the length as the packet
1710          * could be forwarded without being segmented before
1711          */
1712         if (skb_is_gso(skb))
1713                 return true;
1714 
1715         return false;
1716 }
1717 EXPORT_SYMBOL_GPL(is_skb_forwardable);
1718 
1719 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1720 {
1721         if (skb_orphan_frags(skb, GFP_ATOMIC) ||
1722             unlikely(!is_skb_forwardable(dev, skb))) {
1723                 atomic_long_inc(&dev->rx_dropped);
1724                 kfree_skb(skb);
1725                 return NET_RX_DROP;
1726         }
1727 
1728         skb_scrub_packet(skb, true);
1729         skb->priority = 0;
1730         skb->protocol = eth_type_trans(skb, dev);
1731         skb_postpull_rcsum(skb, eth_hdr(skb), ETH_HLEN);
1732 
1733         return 0;
1734 }
1735 EXPORT_SYMBOL_GPL(__dev_forward_skb);
1736 
1737 /**
1738  * dev_forward_skb - loopback an skb to another netif
1739  *
1740  * @dev: destination network device
1741  * @skb: buffer to forward
1742  *
1743  * return values:
1744  *      NET_RX_SUCCESS  (no congestion)
1745  *      NET_RX_DROP     (packet was dropped, but freed)
1746  *
1747  * dev_forward_skb can be used for injecting an skb from the
1748  * start_xmit function of one device into the receive queue
1749  * of another device.
1750  *
1751  * The receiving device may be in another namespace, so
1752  * we have to clear all information in the skb that could
1753  * impact namespace isolation.
1754  */
1755 int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
1756 {
1757         return __dev_forward_skb(dev, skb) ?: netif_rx_internal(skb);
1758 }
1759 EXPORT_SYMBOL_GPL(dev_forward_skb);
1760 
1761 static inline int deliver_skb(struct sk_buff *skb,
1762                               struct packet_type *pt_prev,
1763                               struct net_device *orig_dev)
1764 {
1765         if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
1766                 return -ENOMEM;
1767         atomic_inc(&skb->users);
1768         return pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
1769 }
1770 
1771 static inline void deliver_ptype_list_skb(struct sk_buff *skb,
1772                                           struct packet_type **pt,
1773                                           struct net_device *orig_dev,
1774                                           __be16 type,
1775                                           struct list_head *ptype_list)
1776 {
1777         struct packet_type *ptype, *pt_prev = *pt;
1778 
1779         list_for_each_entry_rcu(ptype, ptype_list, list) {
1780                 if (ptype->type != type)
1781                         continue;
1782                 if (pt_prev)
1783                         deliver_skb(skb, pt_prev, orig_dev);
1784                 pt_prev = ptype;
1785         }
1786         *pt = pt_prev;
1787 }
1788 
1789 static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
1790 {
1791         if (!ptype->af_packet_priv || !skb->sk)
1792                 return false;
1793 
1794         if (ptype->id_match)
1795                 return ptype->id_match(ptype, skb->sk);
1796         else if ((struct sock *)ptype->af_packet_priv == skb->sk)
1797                 return true;
1798 
1799         return false;
1800 }
1801 
1802 /*
1803  *      Support routine. Sends outgoing frames to any network
1804  *      taps currently in use.
1805  */
1806 
1807 static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
1808 {
1809         struct packet_type *ptype;
1810         struct sk_buff *skb2 = NULL;
1811         struct packet_type *pt_prev = NULL;
1812         struct list_head *ptype_list = &ptype_all;
1813 
1814         rcu_read_lock();
1815 again:
1816         list_for_each_entry_rcu(ptype, ptype_list, list) {
1817                 /* Never send packets back to the socket
1818                  * they originated from - MvS (miquels@drinkel.ow.org)
1819                  */
1820                 if (skb_loop_sk(ptype, skb))
1821                         continue;
1822 
1823                 if (pt_prev) {
1824                         deliver_skb(skb2, pt_prev, skb->dev);
1825                         pt_prev = ptype;
1826                         continue;
1827                 }
1828 
1829                 /* need to clone skb, done only once */
1830                 skb2 = skb_clone(skb, GFP_ATOMIC);
1831                 if (!skb2)
1832                         goto out_unlock;
1833 
1834                 net_timestamp_set(skb2);
1835 
1836                 /* skb->nh should be correctly
1837                  * set by sender, so that the second statement is
1838                  * just protection against buggy protocols.
1839                  */
1840                 skb_reset_mac_header(skb2);
1841 
1842                 if (skb_network_header(skb2) < skb2->data ||
1843                     skb_network_header(skb2) > skb_tail_pointer(skb2)) {
1844                         net_crit_ratelimited("protocol %04x is buggy, dev %s\n",
1845                                              ntohs(skb2->protocol),
1846                                              dev->name);
1847                         skb_reset_network_header(skb2);
1848                 }
1849 
1850                 skb2->transport_header = skb2->network_header;
1851                 skb2->pkt_type = PACKET_OUTGOING;
1852                 pt_prev = ptype;
1853         }
1854 
1855         if (ptype_list == &ptype_all) {
1856                 ptype_list = &dev->ptype_all;
1857                 goto again;
1858         }
1859 out_unlock:
1860         if (pt_prev)
1861                 pt_prev->func(skb2, skb->dev, pt_prev, skb->dev);
1862         rcu_read_unlock();
1863 }
1864 
1865 /**
1866  * netif_setup_tc - Handle tc mappings on real_num_tx_queues change
1867  * @dev: Network device
1868  * @txq: number of queues available
1869  *
1870  * If real_num_tx_queues is changed the tc mappings may no longer be
1871  * valid. To resolve this verify the tc mapping remains valid and if
1872  * not NULL the mapping. With no priorities mapping to this
1873  * offset/count pair it will no longer be used. In the worst case TC0
1874  * is invalid nothing can be done so disable priority mappings. If is
1875  * expected that drivers will fix this mapping if they can before
1876  * calling netif_set_real_num_tx_queues.
1877  */
1878 static void netif_setup_tc(struct net_device *dev, unsigned int txq)
1879 {
1880         int i;
1881         struct netdev_tc_txq *tc = &dev->tc_to_txq[0];
1882 
1883         /* If TC0 is invalidated disable TC mapping */
1884         if (tc->offset + tc->count > txq) {
1885                 pr_warn("Number of in use tx queues changed invalidating tc mappings. Priority traffic classification disabled!\n");
1886                 dev->num_tc = 0;
1887                 return;
1888         }
1889 
1890         /* Invalidated prio to tc mappings set to TC0 */
1891         for (i = 1; i < TC_BITMASK + 1; i++) {
1892                 int q = netdev_get_prio_tc_map(dev, i);
1893 
1894                 tc = &dev->tc_to_txq[q];
1895                 if (tc->offset + tc->count > txq) {
1896                         pr_warn("Number of in use tx queues changed. Priority %i to tc mapping %i is no longer valid. Setting map to 0\n",
1897                                 i, q);
1898                         netdev_set_prio_tc_map(dev, i, 0);
1899                 }
1900         }
1901 }
1902 
1903 #ifdef CONFIG_XPS
1904 static DEFINE_MUTEX(xps_map_mutex);
1905 #define xmap_dereference(P)             \
1906         rcu_dereference_protected((P), lockdep_is_held(&xps_map_mutex))
1907 
1908 static struct xps_map *remove_xps_queue(struct xps_dev_maps *dev_maps,
1909                                         int cpu, u16 index)
1910 {
1911         struct xps_map *map = NULL;
1912         int pos;
1913 
1914         if (dev_maps)
1915                 map = xmap_dereference(dev_maps->cpu_map[cpu]);
1916 
1917         for (pos = 0; map && pos < map->len; pos++) {
1918                 if (map->queues[pos] == index) {
1919                         if (map->len > 1) {
1920                                 map->queues[pos] = map->queues[--map->len];
1921                         } else {
1922                                 RCU_INIT_POINTER(dev_maps->cpu_map[cpu], NULL);
1923                                 kfree_rcu(map, rcu);
1924                                 map = NULL;
1925                         }
1926                         break;
1927                 }
1928         }
1929 
1930         return map;
1931 }
1932 
1933 static void netif_reset_xps_queues_gt(struct net_device *dev, u16 index)
1934 {
1935         struct xps_dev_maps *dev_maps;
1936         int cpu, i;
1937         bool active = false;
1938 
1939         mutex_lock(&xps_map_mutex);
1940         dev_maps = xmap_dereference(dev->xps_maps);
1941 
1942         if (!dev_maps)
1943                 goto out_no_maps;
1944 
1945         for_each_possible_cpu(cpu) {
1946                 for (i = index; i < dev->num_tx_queues; i++) {
1947                         if (!remove_xps_queue(dev_maps, cpu, i))
1948                                 break;
1949                 }
1950                 if (i == dev->num_tx_queues)
1951                         active = true;
1952         }
1953 
1954         if (!active) {
1955                 RCU_INIT_POINTER(dev->xps_maps, NULL);
1956                 kfree_rcu(dev_maps, rcu);
1957         }
1958 
1959         for (i = index; i < dev->num_tx_queues; i++)
1960                 netdev_queue_numa_node_write(netdev_get_tx_queue(dev, i),
1961                                              NUMA_NO_NODE);
1962 
1963 out_no_maps:
1964         mutex_unlock(&xps_map_mutex);
1965 }
1966 
1967 static struct xps_map *expand_xps_map(struct xps_map *map,
1968                                       int cpu, u16 index)
1969 {
1970         struct xps_map *new_map;
1971         int alloc_len = XPS_MIN_MAP_ALLOC;
1972         int i, pos;
1973 
1974         for (pos = 0; map && pos < map->len; pos++) {
1975                 if (map->queues[pos] != index)
1976                         continue;
1977                 return map;
1978         }
1979 
1980         /* Need to add queue to this CPU's existing map */
1981         if (map) {
1982                 if (pos < map->alloc_len)
1983                         return map;
1984 
1985                 alloc_len = map->alloc_len * 2;
1986         }
1987 
1988         /* Need to allocate new map to store queue on this CPU's map */
1989         new_map = kzalloc_node(XPS_MAP_SIZE(alloc_len), GFP_KERNEL,
1990                                cpu_to_node(cpu));
1991         if (!new_map)
1992                 return NULL;
1993 
1994         for (i = 0; i < pos; i++)
1995                 new_map->queues[i] = map->queues[i];
1996         new_map->alloc_len = alloc_len;
1997         new_map->len = pos;
1998 
1999         return new_map;
2000 }
2001 
2002 int netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask,
2003                         u16 index)
2004 {
2005         struct xps_dev_maps *dev_maps, *new_dev_maps = NULL;
2006         struct xps_map *map, *new_map;
2007         int maps_sz = max_t(unsigned int, XPS_DEV_MAPS_SIZE, L1_CACHE_BYTES);
2008         int cpu, numa_node_id = -2;
2009         bool active = false;
2010 
2011         mutex_lock(&xps_map_mutex);
2012 
2013         dev_maps = xmap_dereference(dev->xps_maps);
2014 
2015         /* allocate memory for queue storage */
2016         for_each_online_cpu(cpu) {
2017                 if (!cpumask_test_cpu(cpu, mask))
2018                         continue;
2019 
2020                 if (!new_dev_maps)
2021                         new_dev_maps = kzalloc(maps_sz, GFP_KERNEL);
2022                 if (!new_dev_maps) {
2023                         mutex_unlock(&xps_map_mutex);
2024                         return -ENOMEM;
2025                 }
2026 
2027                 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2028                                  NULL;
2029 
2030                 map = expand_xps_map(map, cpu, index);
2031                 if (!map)
2032                         goto error;
2033 
2034                 RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
2035         }
2036 
2037         if (!new_dev_maps)
2038                 goto out_no_new_maps;
2039 
2040         for_each_possible_cpu(cpu) {
2041                 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu)) {
2042                         /* add queue to CPU maps */
2043                         int pos = 0;
2044 
2045                         map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2046                         while ((pos < map->len) && (map->queues[pos] != index))
2047                                 pos++;
2048 
2049                         if (pos == map->len)
2050                                 map->queues[map->len++] = index;
2051 #ifdef CONFIG_NUMA
2052                         if (numa_node_id == -2)
2053                                 numa_node_id = cpu_to_node(cpu);
2054                         else if (numa_node_id != cpu_to_node(cpu))
2055                                 numa_node_id = -1;
2056 #endif
2057                 } else if (dev_maps) {
2058                         /* fill in the new device map from the old device map */
2059                         map = xmap_dereference(dev_maps->cpu_map[cpu]);
2060                         RCU_INIT_POINTER(new_dev_maps->cpu_map[cpu], map);
2061                 }
2062 
2063         }
2064 
2065         rcu_assign_pointer(dev->xps_maps, new_dev_maps);
2066 
2067         /* Cleanup old maps */
2068         if (dev_maps) {
2069                 for_each_possible_cpu(cpu) {
2070                         new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2071                         map = xmap_dereference(dev_maps->cpu_map[cpu]);
2072                         if (map && map != new_map)
2073                                 kfree_rcu(map, rcu);
2074                 }
2075 
2076                 kfree_rcu(dev_maps, rcu);
2077         }
2078 
2079         dev_maps = new_dev_maps;
2080         active = true;
2081 
2082 out_no_new_maps:
2083         /* update Tx queue numa node */
2084         netdev_queue_numa_node_write(netdev_get_tx_queue(dev, index),
2085                                      (numa_node_id >= 0) ? numa_node_id :
2086                                      NUMA_NO_NODE);
2087 
2088         if (!dev_maps)
2089                 goto out_no_maps;
2090 
2091         /* removes queue from unused CPUs */
2092         for_each_possible_cpu(cpu) {
2093                 if (cpumask_test_cpu(cpu, mask) && cpu_online(cpu))
2094                         continue;
2095 
2096                 if (remove_xps_queue(dev_maps, cpu, index))
2097                         active = true;
2098         }
2099 
2100         /* free map if not active */
2101         if (!active) {
2102                 RCU_INIT_POINTER(dev->xps_maps, NULL);
2103                 kfree_rcu(dev_maps, rcu);
2104         }
2105 
2106 out_no_maps:
2107         mutex_unlock(&xps_map_mutex);
2108 
2109         return 0;
2110 error:
2111         /* remove any maps that we added */
2112         for_each_possible_cpu(cpu) {
2113                 new_map = xmap_dereference(new_dev_maps->cpu_map[cpu]);
2114                 map = dev_maps ? xmap_dereference(dev_maps->cpu_map[cpu]) :
2115                                  NULL;
2116                 if (new_map && new_map != map)
2117                         kfree(new_map);
2118         }
2119 
2120         mutex_unlock(&xps_map_mutex);
2121 
2122         kfree(new_dev_maps);
2123         return -ENOMEM;
2124 }
2125 EXPORT_SYMBOL(netif_set_xps_queue);
2126 
2127 #endif
2128 /*
2129  * Routine to help set real_num_tx_queues. To avoid skbs mapped to queues
2130  * greater then real_num_tx_queues stale skbs on the qdisc must be flushed.
2131  */
2132 int netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq)
2133 {
2134         int rc;
2135 
2136         if (txq < 1 || txq > dev->num_tx_queues)
2137                 return -EINVAL;
2138 
2139         if (dev->reg_state == NETREG_REGISTERED ||
2140             dev->reg_state == NETREG_UNREGISTERING) {
2141                 ASSERT_RTNL();
2142 
2143                 rc = netdev_queue_update_kobjects(dev, dev->real_num_tx_queues,
2144                                                   txq);
2145                 if (rc)
2146                         return rc;
2147 
2148                 if (dev->num_tc)
2149                         netif_setup_tc(dev, txq);
2150 
2151                 if (txq < dev->real_num_tx_queues) {
2152                         qdisc_reset_all_tx_gt(dev, txq);
2153 #ifdef CONFIG_XPS
2154                         netif_reset_xps_queues_gt(dev, txq);
2155 #endif
2156                 }
2157         }
2158 
2159         dev->real_num_tx_queues = txq;
2160         return 0;
2161 }
2162 EXPORT_SYMBOL(netif_set_real_num_tx_queues);
2163 
2164 #ifdef CONFIG_SYSFS
2165 /**
2166  *      netif_set_real_num_rx_queues - set actual number of RX queues used
2167  *      @dev: Network device
2168  *      @rxq: Actual number of RX queues
2169  *
2170  *      This must be called either with the rtnl_lock held or before
2171  *      registration of the net device.  Returns 0 on success, or a
2172  *      negative error code.  If called before registration, it always
2173  *      succeeds.
2174  */
2175 int netif_set_real_num_rx_queues(struct net_device *dev, unsigned int rxq)
2176 {
2177         int rc;
2178 
2179         if (rxq < 1 || rxq > dev->num_rx_queues)
2180                 return -EINVAL;
2181 
2182         if (dev->reg_state == NETREG_REGISTERED) {
2183                 ASSERT_RTNL();
2184 
2185                 rc = net_rx_queue_update_kobjects(dev, dev->real_num_rx_queues,
2186                                                   rxq);
2187                 if (rc)
2188                         return rc;
2189         }
2190 
2191         dev->real_num_rx_queues = rxq;
2192         return 0;
2193 }
2194 EXPORT_SYMBOL(netif_set_real_num_rx_queues);
2195 #endif
2196 
2197 /**
2198  * netif_get_num_default_rss_queues - default number of RSS queues
2199  *
2200  * This routine should set an upper limit on the number of RSS queues
2201  * used by default by multiqueue devices.
2202  */
2203 int netif_get_num_default_rss_queues(void)
2204 {
2205         return min_t(int, DEFAULT_MAX_NUM_RSS_QUEUES, num_online_cpus());
2206 }
2207 EXPORT_SYMBOL(netif_get_num_default_rss_queues);
2208 
2209 static inline void __netif_reschedule(struct Qdisc *q)
2210 {
2211         struct softnet_data *sd;
2212         unsigned long flags;
2213 
2214         local_irq_save(flags);
2215         sd = this_cpu_ptr(&softnet_data);
2216         q->next_sched = NULL;
2217         *sd->output_queue_tailp = q;
2218         sd->output_queue_tailp = &q->next_sched;
2219         raise_softirq_irqoff(NET_TX_SOFTIRQ);
2220         local_irq_restore(flags);
2221 }
2222 
2223 void __netif_schedule(struct Qdisc *q)
2224 {
2225         if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
2226                 __netif_reschedule(q);
2227 }
2228 EXPORT_SYMBOL(__netif_schedule);
2229 
2230 struct dev_kfree_skb_cb {
2231         enum skb_free_reason reason;
2232 };
2233 
2234 static struct dev_kfree_skb_cb *get_kfree_skb_cb(const struct sk_buff *skb)
2235 {
2236         return (struct dev_kfree_skb_cb *)skb->cb;
2237 }
2238 
2239 void netif_schedule_queue(struct netdev_queue *txq)
2240 {
2241         rcu_read_lock();
2242         if (!(txq->state & QUEUE_STATE_ANY_XOFF)) {
2243                 struct Qdisc *q = rcu_dereference(txq->qdisc);
2244 
2245                 __netif_schedule(q);
2246         }
2247         rcu_read_unlock();
2248 }
2249 EXPORT_SYMBOL(netif_schedule_queue);
2250 
2251 /**
2252  *      netif_wake_subqueue - allow sending packets on subqueue
2253  *      @dev: network device
2254  *      @queue_index: sub queue index
2255  *
2256  * Resume individual transmit queue of a device with multiple transmit queues.
2257  */
2258 void netif_wake_subqueue(struct net_device *dev, u16 queue_index)
2259 {
2260         struct netdev_queue *txq = netdev_get_tx_queue(dev, queue_index);
2261 
2262         if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &txq->state)) {
2263                 struct Qdisc *q;
2264 
2265                 rcu_read_lock();
2266                 q = rcu_dereference(txq->qdisc);
2267                 __netif_schedule(q);
2268                 rcu_read_unlock();
2269         }
2270 }
2271 EXPORT_SYMBOL(netif_wake_subqueue);
2272 
2273 void netif_tx_wake_queue(struct netdev_queue *dev_queue)
2274 {
2275         if (test_and_clear_bit(__QUEUE_STATE_DRV_XOFF, &dev_queue->state)) {
2276                 struct Qdisc *q;
2277 
2278                 rcu_read_lock();
2279                 q = rcu_dereference(dev_queue->qdisc);
2280                 __netif_schedule(q);
2281                 rcu_read_unlock();
2282         }
2283 }
2284 EXPORT_SYMBOL(netif_tx_wake_queue);
2285 
2286 void __dev_kfree_skb_irq(struct sk_buff *skb, enum skb_free_reason reason)
2287 {
2288         unsigned long flags;
2289 
2290         if (likely(atomic_read(&skb->users) == 1)) {
2291                 smp_rmb();
2292                 atomic_set(&skb->users, 0);
2293         } else if (likely(!atomic_dec_and_test(&skb->users))) {
2294                 return;
2295         }
2296         get_kfree_skb_cb(skb)->reason = reason;
2297         local_irq_save(flags);
2298         skb->next = __this_cpu_read(softnet_data.completion_queue);
2299         __this_cpu_write(softnet_data.completion_queue, skb);
2300         raise_softirq_irqoff(NET_TX_SOFTIRQ);
2301         local_irq_restore(flags);
2302 }
2303 EXPORT_SYMBOL(__dev_kfree_skb_irq);
2304 
2305 void __dev_kfree_skb_any(struct sk_buff *skb, enum skb_free_reason reason)
2306 {
2307         if (in_irq() || irqs_disabled())
2308                 __dev_kfree_skb_irq(skb, reason);
2309         else
2310                 dev_kfree_skb(skb);
2311 }
2312 EXPORT_SYMBOL(__dev_kfree_skb_any);
2313 
2314 
2315 /**
2316  * netif_device_detach - mark device as removed
2317  * @dev: network device
2318  *
2319  * Mark device as removed from system and therefore no longer available.
2320  */
2321 void netif_device_detach(struct net_device *dev)
2322 {
2323         if (test_and_clear_bit(__LINK_STATE_PRESENT, &dev->state) &&
2324             netif_running(dev)) {
2325                 netif_tx_stop_all_queues(dev);
2326         }
2327 }
2328 EXPORT_SYMBOL(netif_device_detach);
2329 
2330 /**
2331  * netif_device_attach - mark device as attached
2332  * @dev: network device
2333  *
2334  * Mark device as attached from system and restart if needed.
2335  */
2336 void netif_device_attach(struct net_device *dev)
2337 {
2338         if (!test_and_set_bit(__LINK_STATE_PRESENT, &dev->state) &&
2339             netif_running(dev)) {
2340                 netif_tx_wake_all_queues(dev);
2341                 __netdev_watchdog_up(dev);
2342         }
2343 }
2344 EXPORT_SYMBOL(netif_device_attach);
2345 
2346 static void skb_warn_bad_offload(const struct sk_buff *skb)
2347 {
2348         static const netdev_features_t null_features = 0;
2349         struct net_device *dev = skb->dev;
2350         const char *driver = "";
2351 
2352         if (!net_ratelimit())
2353                 return;
2354 
2355         if (dev && dev->dev.parent)
2356                 driver = dev_driver_string(dev->dev.parent);
2357 
2358         WARN(1, "%s: caps=(%pNF, %pNF) len=%d data_len=%d gso_size=%d "
2359              "gso_type=%d ip_summed=%d\n",
2360              driver, dev ? &dev->features : &null_features,
2361              skb->sk ? &skb->sk->sk_route_caps : &null_features,
2362              skb->len, skb->data_len, skb_shinfo(skb)->gso_size,
2363              skb_shinfo(skb)->gso_type, skb->ip_summed);
2364 }
2365 
2366 /*
2367  * Invalidate hardware checksum when packet is to be mangled, and
2368  * complete checksum manually on outgoing path.
2369  */
2370 int skb_checksum_help(struct sk_buff *skb)
2371 {
2372         __wsum csum;
2373         int ret = 0, offset;
2374 
2375         if (skb->ip_summed == CHECKSUM_COMPLETE)
2376                 goto out_set_summed;
2377 
2378         if (unlikely(skb_shinfo(skb)->gso_size)) {
2379                 skb_warn_bad_offload(skb);
2380                 return -EINVAL;
2381         }
2382 
2383         /* Before computing a checksum, we should make sure no frag could
2384          * be modified by an external entity : checksum could be wrong.
2385          */
2386         if (skb_has_shared_frag(skb)) {
2387                 ret = __skb_linearize(skb);
2388                 if (ret)
2389                         goto out;
2390         }
2391 
2392         offset = skb_checksum_start_offset(skb);
2393         BUG_ON(offset >= skb_headlen(skb));
2394         csum = skb_checksum(skb, offset, skb->len - offset, 0);
2395 
2396         offset += skb->csum_offset;
2397         BUG_ON(offset + sizeof(__sum16) > skb_headlen(skb));
2398 
2399         if (skb_cloned(skb) &&
2400             !skb_clone_writable(skb, offset + sizeof(__sum16))) {
2401                 ret = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
2402                 if (ret)
2403                         goto out;
2404         }
2405 
2406         *(__sum16 *)(skb->data + offset) = csum_fold(csum);
2407 out_set_summed:
2408         skb->ip_summed = CHECKSUM_NONE;
2409 out:
2410         return ret;
2411 }
2412 EXPORT_SYMBOL(skb_checksum_help);
2413 
2414 __be16 skb_network_protocol(struct sk_buff *skb, int *depth)
2415 {
2416         __be16 type = skb->protocol;
2417 
2418         /* Tunnel gso handlers can set protocol to ethernet. */
2419         if (type == htons(ETH_P_TEB)) {
2420                 struct ethhdr *eth;
2421 
2422                 if (unlikely(!pskb_may_pull(skb, sizeof(struct ethhdr))))
2423                         return 0;
2424 
2425                 eth = (struct ethhdr *)skb_mac_header(skb);
2426                 type = eth->h_proto;
2427         }
2428 
2429         return __vlan_get_protocol(skb, type, depth);
2430 }
2431 
2432 /**
2433  *      skb_mac_gso_segment - mac layer segmentation handler.
2434  *      @skb: buffer to segment
2435  *      @features: features for the output path (see dev->features)
2436  */
2437 struct sk_buff *skb_mac_gso_segment(struct sk_buff *skb,
2438                                     netdev_features_t features)
2439 {
2440         struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
2441         struct packet_offload *ptype;
2442         int vlan_depth = skb->mac_len;
2443         __be16 type = skb_network_protocol(skb, &vlan_depth);
2444 
2445         if (unlikely(!type))
2446                 return ERR_PTR(-EINVAL);
2447 
2448         __skb_pull(skb, vlan_depth);
2449 
2450         rcu_read_lock();
2451         list_for_each_entry_rcu(ptype, &offload_base, list) {
2452                 if (ptype->type == type && ptype->callbacks.gso_segment) {
2453                         segs = ptype->callbacks.gso_segment(skb, features);
2454                         break;
2455                 }
2456         }
2457         rcu_read_unlock();
2458 
2459         __skb_push(skb, skb->data - skb_mac_header(skb));
2460 
2461         return segs;
2462 }
2463 EXPORT_SYMBOL(skb_mac_gso_segment);
2464 
2465 
2466 /* openvswitch calls this on rx path, so we need a different check.
2467  */
2468 static inline bool skb_needs_check(struct sk_buff *skb, bool tx_path)
2469 {
2470         if (tx_path)
2471                 return skb->ip_summed != CHECKSUM_PARTIAL;
2472         else
2473                 return skb->ip_summed == CHECKSUM_NONE;
2474 }
2475 
2476 /**
2477  *      __skb_gso_segment - Perform segmentation on skb.
2478  *      @skb: buffer to segment
2479  *      @features: features for the output path (see dev->features)
2480  *      @tx_path: whether it is called in TX path
2481  *
2482  *      This function segments the given skb and returns a list of segments.
2483  *
2484  *      It may return NULL if the skb requires no segmentation.  This is
2485  *      only possible when GSO is used for verifying header integrity.
2486  */
2487 struct sk_buff *__skb_gso_segment(struct sk_buff *skb,
2488                                   netdev_features_t features, bool tx_path)
2489 {
2490         if (unlikely(skb_needs_check(skb, tx_path))) {
2491                 int err;
2492 
2493                 skb_warn_bad_offload(skb);
2494 
2495                 err = skb_cow_head(skb, 0);
2496                 if (err < 0)
2497                         return ERR_PTR(err);
2498         }
2499 
2500         SKB_GSO_CB(skb)->mac_offset = skb_headroom(skb);
2501         SKB_GSO_CB(skb)->encap_level = 0;
2502 
2503         skb_reset_mac_header(skb);
2504         skb_reset_mac_len(skb);
2505 
2506         return skb_mac_gso_segment(skb, features);
2507 }
2508 EXPORT_SYMBOL(__skb_gso_segment);
2509 
2510 /* Take action when hardware reception checksum errors are detected. */
2511 #ifdef CONFIG_BUG
2512 void netdev_rx_csum_fault(struct net_device *dev)
2513 {
2514         if (net_ratelimit()) {
2515                 pr_err("%s: hw csum failure\n", dev ? dev->name : "<unknown>");
2516                 dump_stack();
2517         }
2518 }
2519 EXPORT_SYMBOL(netdev_rx_csum_fault);
2520 #endif
2521 
2522 /* Actually, we should eliminate this check as soon as we know, that:
2523  * 1. IOMMU is present and allows to map all the memory.
2524  * 2. No high memory really exists on this machine.
2525  */
2526 
2527 static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
2528 {
2529 #ifdef CONFIG_HIGHMEM
2530         int i;
2531         if (!(dev->features & NETIF_F_HIGHDMA)) {
2532                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2533                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2534                         if (PageHighMem(skb_frag_page(frag)))
2535                                 return 1;
2536                 }
2537         }
2538 
2539         if (PCI_DMA_BUS_IS_PHYS) {
2540                 struct device *pdev = dev->dev.parent;
2541 
2542                 if (!pdev)
2543                         return 0;
2544                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2545                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2546                         dma_addr_t addr = page_to_phys(skb_frag_page(frag));
2547                         if (!pdev->dma_mask || addr + PAGE_SIZE - 1 > *pdev->dma_mask)
2548                                 return 1;
2549                 }
2550         }
2551 #endif
2552         return 0;
2553 }
2554 
2555 /* If MPLS offload request, verify we are testing hardware MPLS features
2556  * instead of standard features for the netdev.
2557  */
2558 #if IS_ENABLED(CONFIG_NET_MPLS_GSO)
2559 static netdev_features_t net_mpls_features(struct sk_buff *skb,
2560                                            netdev_features_t features,
2561                                            __be16 type)
2562 {
2563         if (eth_p_mpls(type))
2564                 features &= skb->dev->mpls_features;
2565 
2566         return features;
2567 }
2568 #else
2569 static netdev_features_t net_mpls_features(struct sk_buff *skb,
2570                                            netdev_features_t features,
2571                                            __be16 type)
2572 {
2573         return features;
2574 }
2575 #endif
2576 
2577 static netdev_features_t harmonize_features(struct sk_buff *skb,
2578         netdev_features_t features)
2579 {
2580         int tmp;
2581         __be16 type;
2582 
2583         type = skb_network_protocol(skb, &tmp);
2584         features = net_mpls_features(skb, features, type);
2585 
2586         if (skb->ip_summed != CHECKSUM_NONE &&
2587             !can_checksum_protocol(features, type)) {
2588                 features &= ~NETIF_F_ALL_CSUM;
2589         } else if (illegal_highdma(skb->dev, skb)) {
2590                 features &= ~NETIF_F_SG;
2591         }
2592 
2593         return features;
2594 }
2595 
2596 netdev_features_t passthru_features_check(struct sk_buff *skb,
2597                                           struct net_device *dev,
2598                                           netdev_features_t features)
2599 {
2600         return features;
2601 }
2602 EXPORT_SYMBOL(passthru_features_check);
2603 
2604 static netdev_features_t dflt_features_check(const struct sk_buff *skb,
2605                                              struct net_device *dev,
2606                                              netdev_features_t features)
2607 {
2608         return vlan_features_check(skb, features);
2609 }
2610 
2611 netdev_features_t netif_skb_features(struct sk_buff *skb)
2612 {
2613         struct net_device *dev = skb->dev;
2614         netdev_features_t features = dev->features;
2615         u16 gso_segs = skb_shinfo(skb)->gso_segs;
2616 
2617         if (gso_segs > dev->gso_max_segs || gso_segs < dev->gso_min_segs)
2618                 features &= ~NETIF_F_GSO_MASK;
2619 
2620         /* If encapsulation offload request, verify we are testing
2621          * hardware encapsulation features instead of standard
2622          * features for the netdev
2623          */
2624         if (skb->encapsulation)
2625                 features &= dev->hw_enc_features;
2626 
2627         if (skb_vlan_tagged(skb))
2628                 features = netdev_intersect_features(features,
2629                                                      dev->vlan_features |
2630                                                      NETIF_F_HW_VLAN_CTAG_TX |
2631                                                      NETIF_F_HW_VLAN_STAG_TX);
2632 
2633         if (dev->netdev_ops->ndo_features_check)
2634                 features &= dev->netdev_ops->ndo_features_check(skb, dev,
2635                                                                 features);
2636         else
2637                 features &= dflt_features_check(skb, dev, features);
2638 
2639         return harmonize_features(skb, features);
2640 }
2641 EXPORT_SYMBOL(netif_skb_features);
2642 
2643 static int xmit_one(struct sk_buff *skb, struct net_device *dev,
2644                     struct netdev_queue *txq, bool more)
2645 {
2646         unsigned int len;
2647         int rc;
2648 
2649         if (!list_empty(&ptype_all) || !list_empty(&dev->ptype_all))
2650                 dev_queue_xmit_nit(skb, dev);
2651 
2652         len = skb->len;
2653         trace_net_dev_start_xmit(skb, dev);
2654         rc = netdev_start_xmit(skb, dev, txq, more);
2655         trace_net_dev_xmit(skb, rc, dev, len);
2656 
2657         return rc;
2658 }
2659 
2660 struct sk_buff *dev_hard_start_xmit(struct sk_buff *first, struct net_device *dev,
2661                                     struct netdev_queue *txq, int *ret)
2662 {
2663         struct sk_buff *skb = first;
2664         int rc = NETDEV_TX_OK;
2665 
2666         while (skb) {
2667                 struct sk_buff *next = skb->next;
2668 
2669                 skb->next = NULL;
2670                 rc = xmit_one(skb, dev, txq, next != NULL);
2671                 if (unlikely(!dev_xmit_complete(rc))) {
2672                         skb->next = next;
2673                         goto out;
2674                 }
2675 
2676                 skb = next;
2677                 if (netif_xmit_stopped(txq) && skb) {
2678                         rc = NETDEV_TX_BUSY;
2679                         break;
2680                 }
2681         }
2682 
2683 out:
2684         *ret = rc;
2685         return skb;
2686 }
2687 
2688 static struct sk_buff *validate_xmit_vlan(struct sk_buff *skb,
2689                                           netdev_features_t features)
2690 {
2691         if (skb_vlan_tag_present(skb) &&
2692             !vlan_hw_offload_capable(features, skb->vlan_proto))
2693                 skb = __vlan_hwaccel_push_inside(skb);
2694         return skb;
2695 }
2696 
2697 static struct sk_buff *validate_xmit_skb(struct sk_buff *skb, struct net_device *dev)
2698 {
2699         netdev_features_t features;
2700 
2701         if (skb->next)
2702                 return skb;
2703 
2704         features = netif_skb_features(skb);
2705         skb = validate_xmit_vlan(skb, features);
2706         if (unlikely(!skb))
2707                 goto out_null;
2708 
2709         if (netif_needs_gso(skb, features)) {
2710                 struct sk_buff *segs;
2711 
2712                 segs = skb_gso_segment(skb, features);
2713                 if (IS_ERR(segs)) {
2714                         goto out_kfree_skb;
2715                 } else if (segs) {
2716                         consume_skb(skb);
2717                         skb = segs;
2718                 }
2719         } else {
2720                 if (skb_needs_linearize(skb, features) &&
2721                     __skb_linearize(skb))
2722                         goto out_kfree_skb;
2723 
2724                 /* If packet is not checksummed and device does not
2725                  * support checksumming for this protocol, complete
2726                  * checksumming here.
2727                  */
2728                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
2729                         if (skb->encapsulation)
2730                                 skb_set_inner_transport_header(skb,
2731                                                                skb_checksum_start_offset(skb));
2732                         else
2733                                 skb_set_transport_header(skb,
2734                                                          skb_checksum_start_offset(skb));
2735                         if (!(features & NETIF_F_ALL_CSUM) &&
2736                             skb_checksum_help(skb))
2737                                 goto out_kfree_skb;
2738                 }
2739         }
2740 
2741         return skb;
2742 
2743 out_kfree_skb:
2744         kfree_skb(skb);
2745 out_null:
2746         return NULL;
2747 }
2748 
2749 struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev)
2750 {
2751         struct sk_buff *next, *head = NULL, *tail;
2752 
2753         for (; skb != NULL; skb = next) {
2754                 next = skb->next;
2755                 skb->next = NULL;
2756 
2757                 /* in case skb wont be segmented, point to itself */
2758                 skb->prev = skb;
2759 
2760                 skb = validate_xmit_skb(skb, dev);
2761                 if (!skb)
2762                         continue;
2763 
2764                 if (!head)
2765                         head = skb;
2766                 else
2767                         tail->next = skb;
2768                 /* If skb was segmented, skb->prev points to
2769                  * the last segment. If not, it still contains skb.
2770                  */
2771                 tail = skb->prev;
2772         }
2773         return head;
2774 }
2775 
2776 static void qdisc_pkt_len_init(struct sk_buff *skb)
2777 {
2778         const struct skb_shared_info *shinfo = skb_shinfo(skb);
2779 
2780         qdisc_skb_cb(skb)->pkt_len = skb->len;
2781 
2782         /* To get more precise estimation of bytes sent on wire,
2783          * we add to pkt_len the headers size of all segments
2784          */
2785         if (shinfo->gso_size)  {
2786                 unsigned int hdr_len;
2787                 u16 gso_segs = shinfo->gso_segs;
2788 
2789                 /* mac layer + network layer */
2790                 hdr_len = skb_transport_header(skb) - skb_mac_header(skb);
2791 
2792                 /* + transport layer */
2793                 if (likely(shinfo->gso_type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6)))
2794                         hdr_len += tcp_hdrlen(skb);
2795                 else
2796                         hdr_len += sizeof(struct udphdr);
2797 
2798                 if (shinfo->gso_type & SKB_GSO_DODGY)
2799                         gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
2800                                                 shinfo->gso_size);
2801 
2802                 qdisc_skb_cb(skb)->pkt_len += (gso_segs - 1) * hdr_len;
2803         }
2804 }
2805 
2806 static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
2807                                  struct net_device *dev,
2808                                  struct netdev_queue *txq)
2809 {
2810         spinlock_t *root_lock = qdisc_lock(q);
2811         bool contended;
2812         int rc;
2813 
2814         qdisc_pkt_len_init(skb);
2815         qdisc_calculate_pkt_len(skb, q);
2816         /*
2817          * Heuristic to force contended enqueues to serialize on a
2818          * separate lock before trying to get qdisc main lock.
2819          * This permits __QDISC___STATE_RUNNING owner to get the lock more
2820          * often and dequeue packets faster.
2821          */
2822         contended = qdisc_is_running(q);
2823         if (unlikely(contended))
2824                 spin_lock(&q->busylock);
2825 
2826         spin_lock(root_lock);
2827         if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
2828                 kfree_skb(skb);
2829                 rc = NET_XMIT_DROP;
2830         } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
2831                    qdisc_run_begin(q)) {
2832                 /*
2833                  * This is a work-conserving queue; there are no old skbs
2834                  * waiting to be sent out; and the qdisc is not running -
2835                  * xmit the skb directly.
2836                  */
2837 
2838                 qdisc_bstats_update(q, skb);
2839 
2840                 if (sch_direct_xmit(skb, q, dev, txq, root_lock, true)) {
2841                         if (unlikely(contended)) {
2842                                 spin_unlock(&q->busylock);
2843                                 contended = false;
2844                         }
2845                         __qdisc_run(q);
2846                 } else
2847                         qdisc_run_end(q);
2848 
2849                 rc = NET_XMIT_SUCCESS;
2850         } else {
2851                 rc = q->enqueue(skb, q) & NET_XMIT_MASK;
2852                 if (qdisc_run_begin(q)) {
2853                         if (unlikely(contended)) {
2854                                 spin_unlock(&q->busylock);
2855                                 contended = false;
2856                         }
2857                         __qdisc_run(q);
2858                 }
2859         }
2860         spin_unlock(root_lock);
2861         if (unlikely(contended))
2862                 spin_unlock(&q->busylock);
2863         return rc;
2864 }
2865 
2866 #if IS_ENABLED(CONFIG_CGROUP_NET_PRIO)
2867 static void skb_update_prio(struct sk_buff *skb)
2868 {
2869         struct netprio_map *map = rcu_dereference_bh(skb->dev->priomap);
2870 
2871         if (!skb->priority && skb->sk && map) {
2872                 unsigned int prioidx = skb->sk->sk_cgrp_prioidx;
2873 
2874                 if (prioidx < map->priomap_len)
2875                         skb->priority = map->priomap[prioidx];
2876         }
2877 }
2878 #else
2879 #define skb_update_prio(skb)
2880 #endif
2881 
2882 DEFINE_PER_CPU(int, xmit_recursion);
2883 EXPORT_SYMBOL(xmit_recursion);
2884 
2885 #define RECURSION_LIMIT 10
2886 
2887 /**
2888  *      dev_loopback_xmit - loop back @skb
2889  *      @skb: buffer to transmit
2890  */
2891 int dev_loopback_xmit(struct sock *sk, struct sk_buff *skb)
2892 {
2893         skb_reset_mac_header(skb);
2894         __skb_pull(skb, skb_network_offset(skb));
2895         skb->pkt_type = PACKET_LOOPBACK;
2896         skb->ip_summed = CHECKSUM_UNNECESSARY;
2897         WARN_ON(!skb_dst(skb));
2898         skb_dst_force(skb);
2899         netif_rx_ni(skb);
2900         return 0;
2901 }
2902 EXPORT_SYMBOL(dev_loopback_xmit);
2903 
2904 /**
2905  *      __dev_queue_xmit - transmit a buffer
2906  *      @skb: buffer to transmit
2907  *      @accel_priv: private data used for L2 forwarding offload
2908  *
2909  *      Queue a buffer for transmission to a network device. The caller must
2910  *      have set the device and priority and built the buffer before calling
2911  *      this function. The function can be called from an interrupt.
2912  *
2913  *      A negative errno code is returned on a failure. A success does not
2914  *      guarantee the frame will be transmitted as it may be dropped due
2915  *      to congestion or traffic shaping.
2916  *
2917  * -----------------------------------------------------------------------------------
2918  *      I notice this method can also return errors from the queue disciplines,
2919  *      including NET_XMIT_DROP, which is a positive value.  So, errors can also
2920  *      be positive.
2921  *
2922  *      Regardless of the return value, the skb is consumed, so it is currently
2923  *      difficult to retry a send to this method.  (You can bump the ref count
2924  *      before sending to hold a reference for retry if you are careful.)
2925  *
2926  *      When calling this method, interrupts MUST be enabled.  This is because
2927  *      the BH enable code must have IRQs enabled so that it will not deadlock.
2928  *          --BLG
2929  */
2930 static int __dev_queue_xmit(struct sk_buff *skb, void *accel_priv)
2931 {
2932         struct net_device *dev = skb->dev;
2933         struct netdev_queue *txq;
2934         struct Qdisc *q;
2935         int rc = -ENOMEM;
2936 
2937         skb_reset_mac_header(skb);
2938 
2939         if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_SCHED_TSTAMP))
2940                 __skb_tstamp_tx(skb, NULL, skb->sk, SCM_TSTAMP_SCHED);
2941 
2942         /* Disable soft irqs for various locks below. Also
2943          * stops preemption for RCU.
2944          */
2945         rcu_read_lock_bh();
2946 
2947         skb_update_prio(skb);
2948 
2949         /* If device/qdisc don't need skb->dst, release it right now while
2950          * its hot in this cpu cache.
2951          */
2952         if (dev->priv_flags & IFF_XMIT_DST_RELEASE)
2953                 skb_dst_drop(skb);
2954         else
2955                 skb_dst_force(skb);
2956 
2957         txq = netdev_pick_tx(dev, skb, accel_priv);
2958         q = rcu_dereference_bh(txq->qdisc);
2959 
2960 #ifdef CONFIG_NET_CLS_ACT
2961         skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_EGRESS);
2962 #endif
2963         trace_net_dev_queue(skb);
2964         if (q->enqueue) {
2965                 rc = __dev_xmit_skb(skb, q, dev, txq);
2966                 goto out;
2967         }
2968 
2969         /* The device has no queue. Common case for software devices:
2970            loopback, all the sorts of tunnels...
2971 
2972            Really, it is unlikely that netif_tx_lock protection is necessary
2973            here.  (f.e. loopback and IP tunnels are clean ignoring statistics
2974            counters.)
2975            However, it is possible, that they rely on protection
2976            made by us here.
2977 
2978            Check this and shot the lock. It is not prone from deadlocks.
2979            Either shot noqueue qdisc, it is even simpler 8)
2980          */
2981         if (dev->flags & IFF_UP) {
2982                 int cpu = smp_processor_id(); /* ok because BHs are off */
2983 
2984                 if (txq->xmit_lock_owner != cpu) {
2985 
2986                         if (__this_cpu_read(xmit_recursion) > RECURSION_LIMIT)
2987                                 goto recursion_alert;
2988 
2989                         skb = validate_xmit_skb(skb, dev);
2990                         if (!skb)
2991                                 goto drop;
2992 
2993                         HARD_TX_LOCK(dev, txq, cpu);
2994 
2995                         if (!netif_xmit_stopped(txq)) {
2996                                 __this_cpu_inc(xmit_recursion);
2997                                 skb = dev_hard_start_xmit(skb, dev, txq, &rc);
2998                                 __this_cpu_dec(xmit_recursion);
2999                                 if (dev_xmit_complete(rc)) {
3000                                         HARD_TX_UNLOCK(dev, txq);
3001                                         goto out;
3002                                 }
3003                         }
3004                         HARD_TX_UNLOCK(dev, txq);
3005                         net_crit_ratelimited("Virtual device %s asks to queue packet!\n",
3006                                              dev->name);
3007                 } else {
3008                         /* Recursion is detected! It is possible,
3009                          * unfortunately
3010                          */
3011 recursion_alert:
3012                         net_crit_ratelimited("Dead loop on virtual device %s, fix it urgently!\n",
3013                                              dev->name);
3014                 }
3015         }
3016 
3017         rc = -ENETDOWN;
3018 drop:
3019         rcu_read_unlock_bh();
3020 
3021         atomic_long_inc(&dev->tx_dropped);
3022         kfree_skb_list(skb);
3023         return rc;
3024 out:
3025         rcu_read_unlock_bh();
3026         return rc;
3027 }
3028 
3029 int dev_queue_xmit_sk(struct sock *sk, struct sk_buff *skb)
3030 {
3031         return __dev_queue_xmit(skb, NULL);
3032 }
3033 EXPORT_SYMBOL(dev_queue_xmit_sk);
3034 
3035 int dev_queue_xmit_accel(struct sk_buff *skb, void *accel_priv)
3036 {
3037         return __dev_queue_xmit(skb, accel_priv);
3038 }
3039 EXPORT_SYMBOL(dev_queue_xmit_accel);
3040 
3041 
3042 /*=======================================================================
3043                         Receiver routines
3044   =======================================================================*/
3045 
3046 int netdev_max_backlog __read_mostly = 1000;
3047 EXPORT_SYMBOL(netdev_max_backlog);
3048 
3049 int netdev_tstamp_prequeue __read_mostly = 1;
3050 int netdev_budget __read_mostly = 300;
3051 int weight_p __read_mostly = 64;            /* old backlog weight */
3052 
3053 /* Called with irq disabled */
3054 static inline void ____napi_schedule(struct softnet_data *sd,
3055                                      struct napi_struct *napi)
3056 {
3057         list_add_tail(&napi->poll_list, &sd->poll_list);
3058         __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3059 }
3060 
3061 #ifdef CONFIG_RPS
3062 
3063 /* One global table that all flow-based protocols share. */
3064 struct rps_sock_flow_table __rcu *rps_sock_flow_table __read_mostly;
3065 EXPORT_SYMBOL(rps_sock_flow_table);
3066 u32 rps_cpu_mask __read_mostly;
3067 EXPORT_SYMBOL(rps_cpu_mask);
3068 
3069 struct static_key rps_needed __read_mostly;
3070 
3071 static struct rps_dev_flow *
3072 set_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3073             struct rps_dev_flow *rflow, u16 next_cpu)
3074 {
3075         if (next_cpu < nr_cpu_ids) {
3076 #ifdef CONFIG_RFS_ACCEL
3077                 struct netdev_rx_queue *rxqueue;
3078                 struct rps_dev_flow_table *flow_table;
3079                 struct rps_dev_flow *old_rflow;
3080                 u32 flow_id;
3081                 u16 rxq_index;
3082                 int rc;
3083 
3084                 /* Should we steer this flow to a different hardware queue? */
3085                 if (!skb_rx_queue_recorded(skb) || !dev->rx_cpu_rmap ||
3086                     !(dev->features & NETIF_F_NTUPLE))
3087                         goto out;
3088                 rxq_index = cpu_rmap_lookup_index(dev->rx_cpu_rmap, next_cpu);
3089                 if (rxq_index == skb_get_rx_queue(skb))
3090                         goto out;
3091 
3092                 rxqueue = dev->_rx + rxq_index;
3093                 flow_table = rcu_dereference(rxqueue->rps_flow_table);
3094                 if (!flow_table)
3095                         goto out;
3096                 flow_id = skb_get_hash(skb) & flow_table->mask;
3097                 rc = dev->netdev_ops->ndo_rx_flow_steer(dev, skb,
3098                                                         rxq_index, flow_id);
3099                 if (rc < 0)
3100                         goto out;
3101                 old_rflow = rflow;
3102                 rflow = &flow_table->flows[flow_id];
3103                 rflow->filter = rc;
3104                 if (old_rflow->filter == rflow->filter)
3105                         old_rflow->filter = RPS_NO_FILTER;
3106         out:
3107 #endif
3108                 rflow->last_qtail =
3109                         per_cpu(softnet_data, next_cpu).input_queue_head;
3110         }
3111 
3112         rflow->cpu = next_cpu;
3113         return rflow;
3114 }
3115 
3116 /*
3117  * get_rps_cpu is called from netif_receive_skb and returns the target
3118  * CPU from the RPS map of the receiving queue for a given skb.
3119  * rcu_read_lock must be held on entry.
3120  */
3121 static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
3122                        struct rps_dev_flow **rflowp)
3123 {
3124         const struct rps_sock_flow_table *sock_flow_table;
3125         struct netdev_rx_queue *rxqueue = dev->_rx;
3126         struct rps_dev_flow_table *flow_table;
3127         struct rps_map *map;
3128         int cpu = -1;
3129         u32 tcpu;
3130         u32 hash;
3131 
3132         if (skb_rx_queue_recorded(skb)) {
3133                 u16 index = skb_get_rx_queue(skb);
3134 
3135                 if (unlikely(index >= dev->real_num_rx_queues)) {
3136                         WARN_ONCE(dev->real_num_rx_queues > 1,
3137                                   "%s received packet on queue %u, but number "
3138                                   "of RX queues is %u\n",
3139                                   dev->name, index, dev->real_num_rx_queues);
3140                         goto done;
3141                 }
3142                 rxqueue += index;
3143         }
3144 
3145         /* Avoid computing hash if RFS/RPS is not active for this rxqueue */
3146 
3147         flow_table = rcu_dereference(rxqueue->rps_flow_table);
3148         map = rcu_dereference(rxqueue->rps_map);
3149         if (!flow_table && !map)
3150                 goto done;
3151 
3152         skb_reset_network_header(skb);
3153         hash = skb_get_hash(skb);
3154         if (!hash)
3155                 goto done;
3156 
3157         sock_flow_table = rcu_dereference(rps_sock_flow_table);
3158         if (flow_table && sock_flow_table) {
3159                 struct rps_dev_flow *rflow;
3160                 u32 next_cpu;
3161                 u32 ident;
3162 
3163                 /* First check into global flow table if there is a match */
3164                 ident = sock_flow_table->ents[hash & sock_flow_table->mask];
3165                 if ((ident ^ hash) & ~rps_cpu_mask)
3166                         goto try_rps;
3167 
3168                 next_cpu = ident & rps_cpu_mask;
3169 
3170                 /* OK, now we know there is a match,
3171                  * we can look at the local (per receive queue) flow table
3172                  */
3173                 rflow = &flow_table->flows[hash & flow_table->mask];
3174                 tcpu = rflow->cpu;
3175 
3176                 /*
3177                  * If the desired CPU (where last recvmsg was done) is
3178                  * different from current CPU (one in the rx-queue flow
3179                  * table entry), switch if one of the following holds:
3180                  *   - Current CPU is unset (>= nr_cpu_ids).
3181                  *   - Current CPU is offline.
3182                  *   - The current CPU's queue tail has advanced beyond the
3183                  *     last packet that was enqueued using this table entry.
3184                  *     This guarantees that all previous packets for the flow
3185                  *     have been dequeued, thus preserving in order delivery.
3186                  */
3187                 if (unlikely(tcpu != next_cpu) &&
3188                     (tcpu >= nr_cpu_ids || !cpu_online(tcpu) ||
3189                      ((int)(per_cpu(softnet_data, tcpu).input_queue_head -
3190                       rflow->last_qtail)) >= 0)) {
3191                         tcpu = next_cpu;
3192                         rflow = set_rps_cpu(dev, skb, rflow, next_cpu);
3193                 }
3194 
3195                 if (tcpu < nr_cpu_ids && cpu_online(tcpu)) {
3196                         *rflowp = rflow;
3197                         cpu = tcpu;
3198                         goto done;
3199                 }
3200         }
3201 
3202 try_rps:
3203 
3204         if (map) {
3205                 tcpu = map->cpus[reciprocal_scale(hash, map->len)];
3206                 if (cpu_online(tcpu)) {
3207                         cpu = tcpu;
3208                         goto done;
3209                 }
3210         }
3211 
3212 done:
3213         return cpu;
3214 }
3215 
3216 #ifdef CONFIG_RFS_ACCEL
3217 
3218 /**
3219  * rps_may_expire_flow - check whether an RFS hardware filter may be removed
3220  * @dev: Device on which the filter was set
3221  * @rxq_index: RX queue index
3222  * @flow_id: Flow ID passed to ndo_rx_flow_steer()
3223  * @filter_id: Filter ID returned by ndo_rx_flow_steer()
3224  *
3225  * Drivers that implement ndo_rx_flow_steer() should periodically call
3226  * this function for each installed filter and remove the filters for
3227  * which it returns %true.
3228  */
3229 bool rps_may_expire_flow(struct net_device *dev, u16 rxq_index,
3230                          u32 flow_id, u16 filter_id)
3231 {
3232         struct netdev_rx_queue *rxqueue = dev->_rx + rxq_index;
3233         struct rps_dev_flow_table *flow_table;
3234         struct rps_dev_flow *rflow;
3235         bool expire = true;
3236         unsigned int cpu;
3237 
3238         rcu_read_lock();
3239         flow_table = rcu_dereference(rxqueue->rps_flow_table);
3240         if (flow_table && flow_id <= flow_table->mask) {
3241                 rflow = &flow_table->flows[flow_id];
3242                 cpu = ACCESS_ONCE(rflow->cpu);
3243                 if (rflow->filter == filter_id && cpu < nr_cpu_ids &&
3244                     ((int)(per_cpu(softnet_data, cpu).input_queue_head -
3245                            rflow->last_qtail) <
3246                      (int)(10 * flow_table->mask)))
3247                         expire = false;
3248         }
3249         rcu_read_unlock();
3250         return expire;
3251 }
3252 EXPORT_SYMBOL(rps_may_expire_flow);
3253 
3254 #endif /* CONFIG_RFS_ACCEL */
3255 
3256 /* Called from hardirq (IPI) context */
3257 static void rps_trigger_softirq(void *data)
3258 {
3259         struct softnet_data *sd = data;
3260 
3261         ____napi_schedule(sd, &sd->backlog);
3262         sd->received_rps++;
3263 }
3264 
3265 #endif /* CONFIG_RPS */
3266 
3267 /*
3268  * Check if this softnet_data structure is another cpu one
3269  * If yes, queue it to our IPI list and return 1
3270  * If no, return 0
3271  */
3272 static int rps_ipi_queued(struct softnet_data *sd)
3273 {
3274 #ifdef CONFIG_RPS
3275         struct softnet_data *mysd = this_cpu_ptr(&softnet_data);
3276 
3277         if (sd != mysd) {
3278                 sd->rps_ipi_next = mysd->rps_ipi_list;
3279                 mysd->rps_ipi_list = sd;
3280 
3281                 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
3282                 return 1;
3283         }
3284 #endif /* CONFIG_RPS */
3285         return 0;
3286 }
3287 
3288 #ifdef CONFIG_NET_FLOW_LIMIT
3289 int netdev_flow_limit_table_len __read_mostly = (1 << 12);
3290 #endif
3291 
3292 static bool skb_flow_limit(struct sk_buff *skb, unsigned int qlen)
3293 {
3294 #ifdef CONFIG_NET_FLOW_LIMIT
3295         struct sd_flow_limit *fl;
3296         struct softnet_data *sd;
3297         unsigned int old_flow, new_flow;
3298 
3299         if (qlen < (netdev_max_backlog >> 1))
3300                 return false;
3301 
3302         sd = this_cpu_ptr(&softnet_data);
3303 
3304         rcu_read_lock();
3305         fl = rcu_dereference(sd->flow_limit);
3306         if (fl) {
3307                 new_flow = skb_get_hash(skb) & (fl->num_buckets - 1);
3308                 old_flow = fl->history[fl->history_head];
3309                 fl->history[fl->history_head] = new_flow;
3310 
3311                 fl->history_head++;
3312                 fl->history_head &= FLOW_LIMIT_HISTORY - 1;
3313 
3314                 if (likely(fl->buckets[old_flow]))
3315                         fl->buckets[old_flow]--;
3316 
3317                 if (++fl->buckets[new_flow] > (FLOW_LIMIT_HISTORY >> 1)) {
3318                         fl->count++;
3319                         rcu_read_unlock();
3320                         return true;
3321                 }
3322         }
3323         rcu_read_unlock();
3324 #endif
3325         return false;
3326 }
3327 
3328 /*
3329  * enqueue_to_backlog is called to queue an skb to a per CPU backlog
3330  * queue (may be a remote CPU queue).
3331  */
3332 static int enqueue_to_backlog(struct sk_buff *skb, int cpu,
3333                               unsigned int *qtail)
3334 {
3335         struct softnet_data *sd;
3336         unsigned long flags;
3337         unsigned int qlen;
3338 
3339         sd = &per_cpu(softnet_data, cpu);
3340 
3341         local_irq_save(flags);
3342 
3343         rps_lock(sd);
3344         qlen = skb_queue_len(&sd->input_pkt_queue);
3345         if (qlen <= netdev_max_backlog && !skb_flow_limit(skb, qlen)) {
3346                 if (qlen) {
3347 enqueue:
3348                         __skb_queue_tail(&sd->input_pkt_queue, skb);
3349                         input_queue_tail_incr_save(sd, qtail);
3350                         rps_unlock(sd);
3351                         local_irq_restore(flags);
3352                         return NET_RX_SUCCESS;
3353                 }
3354 
3355                 /* Schedule NAPI for backlog device
3356                  * We can use non atomic operation since we own the queue lock
3357                  */
3358                 if (!__test_and_set_bit(NAPI_STATE_SCHED, &sd->backlog.state)) {
3359                         if (!rps_ipi_queued(sd))
3360                                 ____napi_schedule(sd, &sd->backlog);
3361                 }
3362                 goto enqueue;
3363         }
3364 
3365         sd->dropped++;
3366         rps_unlock(sd);
3367 
3368         local_irq_restore(flags);
3369 
3370         atomic_long_inc(&skb->dev->rx_dropped);
3371         kfree_skb(skb);
3372         return NET_RX_DROP;
3373 }
3374 
3375 static int netif_rx_internal(struct sk_buff *skb)
3376 {
3377         int ret;
3378 
3379         net_timestamp_check(netdev_tstamp_prequeue, skb);
3380 
3381         trace_netif_rx(skb);
3382 #ifdef CONFIG_RPS
3383         if (static_key_false(&rps_needed)) {
3384                 struct rps_dev_flow voidflow, *rflow = &voidflow;
3385                 int cpu;
3386 
3387                 preempt_disable();
3388                 rcu_read_lock();
3389 
3390                 cpu = get_rps_cpu(skb->dev, skb, &rflow);
3391                 if (cpu < 0)
3392                         cpu = smp_processor_id();
3393 
3394                 ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3395 
3396                 rcu_read_unlock();
3397                 preempt_enable();
3398         } else
3399 #endif
3400         {
3401                 unsigned int qtail;
3402                 ret = enqueue_to_backlog(skb, get_cpu(), &qtail);
3403                 put_cpu();
3404         }
3405         return ret;
3406 }
3407 
3408 /**
3409  *      netif_rx        -       post buffer to the network code
3410  *      @skb: buffer to post
3411  *
3412  *      This function receives a packet from a device driver and queues it for
3413  *      the upper (protocol) levels to process.  It always succeeds. The buffer
3414  *      may be dropped during processing for congestion control or by the
3415  *      protocol layers.
3416  *
3417  *      return values:
3418  *      NET_RX_SUCCESS  (no congestion)
3419  *      NET_RX_DROP     (packet was dropped)
3420  *
3421  */
3422 
3423 int netif_rx(struct sk_buff *skb)
3424 {
3425         trace_netif_rx_entry(skb);
3426 
3427         return netif_rx_internal(skb);
3428 }
3429 EXPORT_SYMBOL(netif_rx);
3430 
3431 int netif_rx_ni(struct sk_buff *skb)
3432 {
3433         int err;
3434 
3435         trace_netif_rx_ni_entry(skb);
3436 
3437         preempt_disable();
3438         err = netif_rx_internal(skb);
3439         if (local_softirq_pending())
3440                 do_softirq();
3441         preempt_enable();
3442 
3443         return err;
3444 }
3445 EXPORT_SYMBOL(netif_rx_ni);
3446 
3447 static void net_tx_action(struct softirq_action *h)
3448 {
3449         struct softnet_data *sd = this_cpu_ptr(&softnet_data);
3450 
3451         if (sd->completion_queue) {
3452                 struct sk_buff *clist;
3453 
3454                 local_irq_disable();
3455                 clist = sd->completion_queue;
3456                 sd->completion_queue = NULL;
3457                 local_irq_enable();
3458 
3459                 while (clist) {
3460                         struct sk_buff *skb = clist;
3461                         clist = clist->next;
3462 
3463                         WARN_ON(atomic_read(&skb->users));
3464                         if (likely(get_kfree_skb_cb(skb)->reason == SKB_REASON_CONSUMED))
3465                                 trace_consume_skb(skb);
3466                         else
3467                                 trace_kfree_skb(skb, net_tx_action);
3468                         __kfree_skb(skb);
3469                 }
3470         }
3471 
3472         if (sd->output_queue) {
3473                 struct Qdisc *head;
3474 
3475                 local_irq_disable();
3476                 head = sd->output_queue;
3477                 sd->output_queue = NULL;
3478                 sd->output_queue_tailp = &sd->output_queue;
3479                 local_irq_enable();
3480 
3481                 while (head) {
3482                         struct Qdisc *q = head;
3483                         spinlock_t *root_lock;
3484 
3485                         head = head->next_sched;
3486 
3487                         root_lock = qdisc_lock(q);
3488                         if (spin_trylock(root_lock)) {
3489                                 smp_mb__before_atomic();
3490                                 clear_bit(__QDISC_STATE_SCHED,
3491                                           &q->state);
3492                                 qdisc_run(q);
3493                                 spin_unlock(root_lock);
3494                         } else {
3495                                 if (!test_bit(__QDISC_STATE_DEACTIVATED,
3496                                               &q->state)) {
3497                                         __netif_reschedule(q);
3498                                 } else {
3499                                         smp_mb__before_atomic();
3500                                         clear_bit(__QDISC_STATE_SCHED,
3501                                                   &q->state);
3502                                 }
3503                         }
3504                 }
3505         }
3506 }
3507 
3508 #if (defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)) && \
3509     (defined(CONFIG_ATM_LANE) || defined(CONFIG_ATM_LANE_MODULE))
3510 /* This hook is defined here for ATM LANE */
3511 int (*br_fdb_test_addr_hook)(struct net_device *dev,
3512                              unsigned char *addr) __read_mostly;
3513 EXPORT_SYMBOL_GPL(br_fdb_test_addr_hook);
3514 #endif
3515 
3516 #ifdef CONFIG_NET_CLS_ACT
3517 /* TODO: Maybe we should just force sch_ingress to be compiled in
3518  * when CONFIG_NET_CLS_ACT is? otherwise some useless instructions
3519  * a compare and 2 stores extra right now if we dont have it on
3520  * but have CONFIG_NET_CLS_ACT
3521  * NOTE: This doesn't stop any functionality; if you dont have
3522  * the ingress scheduler, you just can't add policies on ingress.
3523  *
3524  */
3525 static int ing_filter(struct sk_buff *skb, struct netdev_queue *rxq)
3526 {
3527         struct net_device *dev = skb->dev;
3528         u32 ttl = G_TC_RTTL(skb->tc_verd);
3529         int result = TC_ACT_OK;
3530         struct Qdisc *q;
3531 
3532         if (unlikely(MAX_RED_LOOP < ttl++)) {
3533                 net_warn_ratelimited("Redir loop detected Dropping packet (%d->%d)\n",
3534                                      skb->skb_iif, dev->ifindex);
3535                 return TC_ACT_SHOT;
3536         }
3537 
3538         skb->tc_verd = SET_TC_RTTL(skb->tc_verd, ttl);
3539         skb->tc_verd = SET_TC_AT(skb->tc_verd, AT_INGRESS);
3540 
3541         q = rcu_dereference(rxq->qdisc);
3542         if (q != &noop_qdisc) {
3543                 spin_lock(qdisc_lock(q));
3544                 if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
3545                         result = qdisc_enqueue_root(skb, q);
3546                 spin_unlock(qdisc_lock(q));
3547         }
3548 
3549         return result;
3550 }
3551 
3552 static inline struct sk_buff *handle_ing(struct sk_buff *skb,
3553                                          struct packet_type **pt_prev,
3554                                          int *ret, struct net_device *orig_dev)
3555 {
3556         struct netdev_queue *rxq = rcu_dereference(skb->dev->ingress_queue);
3557 
3558         if (!rxq || rcu_access_pointer(rxq->qdisc) == &noop_qdisc)
3559                 return skb;
3560 
3561         if (*pt_prev) {
3562                 *ret = deliver_skb(skb, *pt_prev, orig_dev);
3563                 *pt_prev = NULL;
3564         }
3565 
3566         switch (ing_filter(skb, rxq)) {
3567         case TC_ACT_SHOT:
3568         case TC_ACT_STOLEN:
3569                 kfree_skb(skb);
3570                 return NULL;
3571         }
3572 
3573         return skb;
3574 }
3575 #endif
3576 
3577 /**
3578  *      netdev_rx_handler_register - register receive handler
3579  *      @dev: device to register a handler for
3580  *      @rx_handler: receive handler to register
3581  *      @rx_handler_data: data pointer that is used by rx handler
3582  *
3583  *      Register a receive handler for a device. This handler will then be
3584  *      called from __netif_receive_skb. A negative errno code is returned
3585  *      on a failure.
3586  *
3587  *      The caller must hold the rtnl_mutex.
3588  *
3589  *      For a general description of rx_handler, see enum rx_handler_result.
3590  */
3591 int netdev_rx_handler_register(struct net_device *dev,
3592                                rx_handler_func_t *rx_handler,
3593                                void *rx_handler_data)
3594 {
3595         ASSERT_RTNL();
3596 
3597         if (dev->rx_handler)
3598                 return -EBUSY;
3599 
3600         /* Note: rx_handler_data must be set before rx_handler */
3601         rcu_assign_pointer(dev->rx_handler_data, rx_handler_data);
3602         rcu_assign_pointer(dev->rx_handler, rx_handler);
3603 
3604         return 0;
3605 }
3606 EXPORT_SYMBOL_GPL(netdev_rx_handler_register);
3607 
3608 /**
3609  *      netdev_rx_handler_unregister - unregister receive handler
3610  *      @dev: device to unregister a handler from
3611  *
3612  *      Unregister a receive handler from a device.
3613  *
3614  *      The caller must hold the rtnl_mutex.
3615  */
3616 void netdev_rx_handler_unregister(struct net_device *dev)
3617 {
3618 
3619         ASSERT_RTNL();
3620         RCU_INIT_POINTER(dev->rx_handler, NULL);
3621         /* a reader seeing a non NULL rx_handler in a rcu_read_lock()
3622          * section has a guarantee to see a non NULL rx_handler_data
3623          * as well.
3624          */
3625         synchronize_net();
3626         RCU_INIT_POINTER(dev->rx_handler_data, NULL);
3627 }
3628 EXPORT_SYMBOL_GPL(netdev_rx_handler_unregister);
3629 
3630 /*
3631  * Limit the use of PFMEMALLOC reserves to those protocols that implement
3632  * the special handling of PFMEMALLOC skbs.
3633  */
3634 static bool skb_pfmemalloc_protocol(struct sk_buff *skb)
3635 {
3636         switch (skb->protocol) {
3637         case htons(ETH_P_ARP):
3638         case htons(ETH_P_IP):
3639         case htons(ETH_P_IPV6):
3640         case htons(ETH_P_8021Q):
3641         case htons(ETH_P_8021AD):
3642                 return true;
3643         default:
3644                 return false;
3645         }
3646 }
3647 
3648 static int __netif_receive_skb_core(struct sk_buff *skb, bool pfmemalloc)
3649 {
3650         struct packet_type *ptype, *pt_prev;
3651         rx_handler_func_t *rx_handler;
3652         struct net_device *orig_dev;
3653         bool deliver_exact = false;
3654         int ret = NET_RX_DROP;
3655         __be16 type;
3656 
3657         net_timestamp_check(!netdev_tstamp_prequeue, skb);
3658 
3659         trace_netif_receive_skb(skb);
3660 
3661         orig_dev = skb->dev;
3662 
3663         skb_reset_network_header(skb);
3664         if (!skb_transport_header_was_set(skb))
3665                 skb_reset_transport_header(skb);
3666         skb_reset_mac_len(skb);
3667 
3668         pt_prev = NULL;
3669 
3670         rcu_read_lock();
3671 
3672 another_round:
3673         skb->skb_iif = skb->dev->ifindex;
3674 
3675         __this_cpu_inc(softnet_data.processed);
3676 
3677         if (skb->protocol == cpu_to_be16(ETH_P_8021Q) ||
3678             skb->protocol == cpu_to_be16(ETH_P_8021AD)) {
3679                 skb = skb_vlan_untag(skb);
3680                 if (unlikely(!skb))
3681                         goto unlock;
3682         }
3683 
3684 #ifdef CONFIG_NET_CLS_ACT
3685         if (skb->tc_verd & TC_NCLS) {
3686                 skb->tc_verd = CLR_TC_NCLS(skb->tc_verd);
3687                 goto ncls;
3688         }
3689 #endif
3690 
3691         if (pfmemalloc)
3692                 goto skip_taps;
3693 
3694         list_for_each_entry_rcu(ptype, &ptype_all, list) {
3695                 if (pt_prev)
3696                         ret = deliver_skb(skb, pt_prev, orig_dev);
3697                 pt_prev = ptype;
3698         }
3699 
3700         list_for_each_entry_rcu(ptype, &skb->dev->ptype_all, list) {
3701                 if (pt_prev)
3702                         ret = deliver_skb(skb, pt_prev, orig_dev);
3703                 pt_prev = ptype;
3704         }
3705 
3706 skip_taps:
3707 #ifdef CONFIG_NET_CLS_ACT
3708         if (static_key_false(&ingress_needed)) {
3709                 skb = handle_ing(skb, &pt_prev, &ret, orig_dev);
3710                 if (!skb)
3711                         goto unlock;
3712         }
3713 
3714         skb->tc_verd = 0;
3715 ncls:
3716 #endif
3717         if (pfmemalloc && !skb_pfmemalloc_protocol(skb))
3718                 goto drop;
3719 
3720         if (skb_vlan_tag_present(skb)) {
3721                 if (pt_prev) {
3722                         ret = deliver_skb(skb, pt_prev, orig_dev);
3723                         pt_prev = NULL;
3724                 }
3725                 if (vlan_do_receive(&skb))
3726                         goto another_round;
3727                 else if (unlikely(!skb))
3728                         goto unlock;
3729         }
3730 
3731         rx_handler = rcu_dereference(skb->dev->rx_handler);
3732         if (rx_handler) {
3733                 if (pt_prev) {
3734                         ret = deliver_skb(skb, pt_prev, orig_dev);
3735                         pt_prev = NULL;
3736                 }
3737                 switch (rx_handler(&skb)) {
3738                 case RX_HANDLER_CONSUMED:
3739                         ret = NET_RX_SUCCESS;
3740                         goto unlock;
3741                 case RX_HANDLER_ANOTHER:
3742                         goto another_round;
3743                 case RX_HANDLER_EXACT:
3744                         deliver_exact = true;
3745                 case RX_HANDLER_PASS:
3746                         break;
3747                 default:
3748                         BUG();
3749                 }
3750         }
3751 
3752         if (unlikely(skb_vlan_tag_present(skb))) {
3753                 if (skb_vlan_tag_get_id(skb))
3754                         skb->pkt_type = PACKET_OTHERHOST;
3755                 /* Note: we might in the future use prio bits
3756                  * and set skb->priority like in vlan_do_receive()
3757                  * For the time being, just ignore Priority Code Point
3758                  */
3759                 skb->vlan_tci = 0;
3760         }
3761 
3762         type = skb->protocol;
3763 
3764         /* deliver only exact match when indicated */
3765         if (likely(!deliver_exact)) {
3766                 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
3767                                        &ptype_base[ntohs(type) &
3768                                                    PTYPE_HASH_MASK]);
3769         }
3770 
3771         deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
3772                                &orig_dev->ptype_specific);
3773 
3774         if (unlikely(skb->dev != orig_dev)) {
3775                 deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
3776                                        &skb->dev->ptype_specific);
3777         }
3778 
3779         if (pt_prev) {
3780                 if (unlikely(skb_orphan_frags(skb, GFP_ATOMIC)))
3781                         goto drop;
3782                 else
3783                         ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
3784         } else {
3785 drop:
3786                 atomic_long_inc(&skb->dev->rx_dropped);
3787                 kfree_skb(skb);
3788                 /* Jamal, now you will not able to escape explaining
3789                  * me how you were going to use this. :-)
3790                  */
3791                 ret = NET_RX_DROP;
3792         }
3793 
3794 unlock:
3795         rcu_read_unlock();
3796         return ret;
3797 }
3798 
3799 static int __netif_receive_skb(struct sk_buff *skb)
3800 {
3801         int ret;
3802 
3803         if (sk_memalloc_socks() && skb_pfmemalloc(skb)) {
3804                 unsigned long pflags = current->flags;
3805 
3806                 /*
3807                  * PFMEMALLOC skbs are special, they should
3808                  * - be delivered to SOCK_MEMALLOC sockets only
3809                  * - stay away from userspace
3810                  * - have bounded memory usage
3811                  *
3812                  * Use PF_MEMALLOC as this saves us from propagating the allocation
3813                  * context down to all allocation sites.
3814                  */
3815                 current->flags |= PF_MEMALLOC;
3816                 ret = __netif_receive_skb_core(skb, true);
3817                 tsk_restore_flags(current, pflags, PF_MEMALLOC);
3818         } else
3819                 ret = __netif_receive_skb_core(skb, false);
3820 
3821         return ret;
3822 }
3823 
3824 static int netif_receive_skb_internal(struct sk_buff *skb)
3825 {
3826         net_timestamp_check(netdev_tstamp_prequeue, skb);
3827 
3828         if (skb_defer_rx_timestamp(skb))
3829                 return NET_RX_SUCCESS;
3830 
3831 #ifdef CONFIG_RPS
3832         if (static_key_false(&rps_needed)) {
3833                 struct rps_dev_flow voidflow, *rflow = &voidflow;
3834                 int cpu, ret;
3835 
3836                 rcu_read_lock();
3837 
3838                 cpu = get_rps_cpu(skb->dev, skb, &rflow);
3839 
3840                 if (cpu >= 0) {
3841                         ret = enqueue_to_backlog(skb, cpu, &rflow->last_qtail);
3842                         rcu_read_unlock();
3843                         return ret;
3844                 }
3845                 rcu_read_unlock();
3846         }
3847 #endif
3848         return __netif_receive_skb(skb);
3849 }
3850 
3851 /**
3852  *      netif_receive_skb - process receive buffer from network
3853  *      @skb: buffer to process
3854  *
3855  *      netif_receive_skb() is the main receive data processing function.
3856  *      It always succeeds. The buffer may be dropped during processing
3857  *      for congestion control or by the protocol layers.
3858  *
3859  *      This function may only be called from softirq context and interrupts
3860  *      should be enabled.
3861  *
3862  *      Return values (usually ignored):
3863  *      NET_RX_SUCCESS: no congestion
3864  *      NET_RX_DROP: packet was dropped
3865  */
3866 int netif_receive_skb_sk(struct sock *sk, struct sk_buff *skb)
3867 {
3868         trace_netif_receive_skb_entry(skb);
3869 
3870         return netif_receive_skb_internal(skb);
3871 }
3872 EXPORT_SYMBOL(netif_receive_skb_sk);
3873 
3874 /* Network device is going away, flush any packets still pending
3875  * Called with irqs disabled.
3876  */
3877 static void flush_backlog(void *arg)
3878 {
3879         struct net_device *dev = arg;
3880         struct softnet_data *sd = this_cpu_ptr(&softnet_data);
3881         struct sk_buff *skb, *tmp;
3882 
3883         rps_lock(sd);
3884         skb_queue_walk_safe(&sd->input_pkt_queue, skb, tmp) {
3885                 if (skb->dev == dev) {
3886                         __skb_unlink(skb, &sd->input_pkt_queue);
3887                         kfree_skb(skb);
3888                         input_queue_head_incr(sd);
3889                 }
3890         }
3891         rps_unlock(sd);
3892 
3893         skb_queue_walk_safe(&sd->process_queue, skb, tmp) {
3894                 if (skb->dev == dev) {
3895                         __skb_unlink(skb, &sd->process_queue);
3896                         kfree_skb(skb);
3897                         input_queue_head_incr(sd);
3898                 }
3899         }
3900 }
3901 
3902 static int napi_gro_complete(struct sk_buff *skb)
3903 {
3904         struct packet_offload *ptype;
3905         __be16 type = skb->protocol;
3906         struct list_head *head = &offload_base;
3907         int err = -ENOENT;
3908 
3909         BUILD_BUG_ON(sizeof(struct napi_gro_cb) > sizeof(skb->cb));
3910 
3911         if (NAPI_GRO_CB(skb)->count == 1) {
3912                 skb_shinfo(skb)->gso_size = 0;
3913                 goto out;
3914         }
3915 
3916         rcu_read_lock();
3917         list_for_each_entry_rcu(ptype, head, list) {
3918                 if (ptype->type != type || !ptype->callbacks.gro_complete)
3919                         continue;
3920 
3921                 err = ptype->callbacks.gro_complete(skb, 0);
3922                 break;
3923         }
3924         rcu_read_unlock();
3925 
3926         if (err) {
3927                 WARN_ON(&ptype->list == head);
3928                 kfree_skb(skb);
3929                 return NET_RX_SUCCESS;
3930         }
3931 
3932 out:
3933         return netif_receive_skb_internal(skb);
3934 }
3935 
3936 /* napi->gro_list contains packets ordered by age.
3937  * youngest packets at the head of it.
3938  * Complete skbs in reverse order to reduce latencies.
3939  */
3940 void napi_gro_flush(struct napi_struct *napi, bool flush_old)
3941 {
3942         struct sk_buff *skb, *prev = NULL;
3943 
3944         /* scan list and build reverse chain */
3945         for (skb = napi->gro_list; skb != NULL; skb = skb->next) {
3946                 skb->prev = prev;
3947                 prev = skb;
3948         }
3949 
3950         for (skb = prev; skb; skb = prev) {
3951                 skb->next = NULL;
3952 
3953                 if (flush_old && NAPI_GRO_CB(skb)->age == jiffies)
3954                         return;
3955 
3956                 prev = skb->prev;
3957                 napi_gro_complete(skb);
3958                 napi->gro_count--;
3959         }
3960 
3961         napi->gro_list = NULL;
3962 }
3963 EXPORT_SYMBOL(napi_gro_flush);
3964 
3965 static void gro_list_prepare(struct napi_struct *napi, struct sk_buff *skb)
3966 {
3967         struct sk_buff *p;
3968         unsigned int maclen = skb->dev->hard_header_len;
3969         u32 hash = skb_get_hash_raw(skb);
3970 
3971         for (p = napi->gro_list; p; p = p->next) {
3972                 unsigned long diffs;
3973 
3974                 NAPI_GRO_CB(p)->flush = 0;
3975 
3976                 if (hash != skb_get_hash_raw(p)) {
3977                         NAPI_GRO_CB(p)->same_flow = 0;
3978                         continue;
3979                 }
3980 
3981                 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
3982                 diffs |= p->vlan_tci ^ skb->vlan_tci;
3983                 if (maclen == ETH_HLEN)
3984                         diffs |= compare_ether_header(skb_mac_header(p),
3985                                                       skb_mac_header(skb));
3986                 else if (!diffs)
3987                         diffs = memcmp(skb_mac_header(p),
3988                                        skb_mac_header(skb),
3989                                        maclen);
3990                 NAPI_GRO_CB(p)->same_flow = !diffs;
3991         }
3992 }
3993 
3994 static void skb_gro_reset_offset(struct sk_buff *skb)
3995 {
3996         const struct skb_shared_info *pinfo = skb_shinfo(skb);
3997         const skb_frag_t *frag0 = &pinfo->frags[0];
3998 
3999         NAPI_GRO_CB(skb)->data_offset = 0;
4000         NAPI_GRO_CB(skb)->frag0 = NULL;
4001         NAPI_GRO_CB(skb)->frag0_len = 0;
4002 
4003         if (skb_mac_header(skb) == skb_tail_pointer(skb) &&
4004             pinfo->nr_frags &&
4005             !PageHighMem(skb_frag_page(frag0))) {
4006                 NAPI_GRO_CB(skb)->frag0 = skb_frag_address(frag0);
4007                 NAPI_GRO_CB(skb)->frag0_len = skb_frag_size(frag0);
4008         }
4009 }
4010 
4011 static void gro_pull_from_frag0(struct sk_buff *skb, int grow)
4012 {
4013         struct skb_shared_info *pinfo = skb_shinfo(skb);
4014 
4015         BUG_ON(skb->end - skb->tail < grow);
4016 
4017         memcpy(skb_tail_pointer(skb), NAPI_GRO_CB(skb)->frag0, grow);
4018 
4019         skb->data_len -= grow;
4020         skb->tail += grow;
4021 
4022         pinfo->frags[0].page_offset += grow;
4023         skb_frag_size_sub(&pinfo->frags[0], grow);
4024 
4025         if (unlikely(!skb_frag_size(&pinfo->frags[0]))) {
4026                 skb_frag_unref(skb, 0);
4027                 memmove(pinfo->frags, pinfo->frags + 1,
4028                         --pinfo->nr_frags * sizeof(pinfo->frags[0]));
4029         }
4030 }
4031 
4032 static enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
4033 {
4034         struct sk_buff **pp = NULL;
4035         struct packet_offload *ptype;
4036         __be16 type = skb->protocol;
4037         struct list_head *head = &offload_base;
4038         int same_flow;
4039         enum gro_result ret;
4040         int grow;
4041 
4042         if (!(skb->dev->features & NETIF_F_GRO))
4043                 goto normal;
4044 
4045         if (skb_is_gso(skb) || skb_has_frag_list(skb) || skb->csum_bad)
4046                 goto normal;
4047 
4048         gro_list_prepare(napi, skb);
4049 
4050         rcu_read_lock();
4051         list_for_each_entry_rcu(ptype, head, list) {
4052                 if (ptype->type != type || !ptype->callbacks.gro_receive)
4053                         continue;
4054 
4055                 skb_set_network_header(skb, skb_gro_offset(skb));
4056                 skb_reset_mac_len(skb);
4057                 NAPI_GRO_CB(skb)->same_flow = 0;
4058                 NAPI_GRO_CB(skb)->flush = 0;
4059                 NAPI_GRO_CB(skb)->free = 0;
4060                 NAPI_GRO_CB(skb)->udp_mark = 0;
4061                 NAPI_GRO_CB(skb)->gro_remcsum_start = 0;
4062 
4063                 /* Setup for GRO checksum validation */
4064                 switch (skb->ip_summed) {
4065                 case CHECKSUM_COMPLETE:
4066                         NAPI_GRO_CB(skb)->csum = skb->csum;
4067                         NAPI_GRO_CB(skb)->csum_valid = 1;
4068                         NAPI_GRO_CB(skb)->csum_cnt = 0;
4069                         break;
4070                 case CHECKSUM_UNNECESSARY:
4071                         NAPI_GRO_CB(skb)->csum_cnt = skb->csum_level + 1;
4072                         NAPI_GRO_CB(skb)->csum_valid = 0;
4073                         break;
4074                 default:
4075                         NAPI_GRO_CB(skb)->csum_cnt = 0;
4076                         NAPI_GRO_CB(skb)->csum_valid = 0;
4077                 }
4078 
4079                 pp = ptype->callbacks.gro_receive(&napi->gro_list, skb);
4080                 break;
4081         }
4082         rcu_read_unlock();
4083 
4084         if (&ptype->list == head)
4085                 goto normal;
4086 
4087         same_flow = NAPI_GRO_CB(skb)->same_flow;
4088         ret = NAPI_GRO_CB(skb)->free ? GRO_MERGED_FREE : GRO_MERGED;
4089 
4090         if (pp) {
4091                 struct sk_buff *nskb = *pp;
4092 
4093                 *pp = nskb->next;
4094                 nskb->next = NULL;
4095                 napi_gro_complete(nskb);
4096                 napi->gro_count--;
4097         }
4098 
4099         if (same_flow)
4100                 goto ok;
4101 
4102         if (NAPI_GRO_CB(skb)->flush)
4103                 goto normal;
4104 
4105         if (unlikely(napi->gro_count >= MAX_GRO_SKBS)) {
4106                 struct sk_buff *nskb = napi->gro_list;
4107 
4108                 /* locate the end of the list to select the 'oldest' flow */
4109                 while (nskb->next) {
4110                         pp = &nskb->next;
4111                         nskb = *pp;
4112                 }
4113                 *pp = NULL;
4114                 nskb->next = NULL;
4115                 napi_gro_complete(nskb);
4116         } else {
4117                 napi->gro_count++;
4118         }
4119         NAPI_GRO_CB(skb)->count = 1;
4120         NAPI_GRO_CB(skb)->age = jiffies;
4121         NAPI_GRO_CB(skb)->last = skb;
4122         skb_shinfo(skb)->gso_size = skb_gro_len(skb);
4123         skb->next = napi->gro_list;
4124         napi->gro_list = skb;
4125         ret = GRO_HELD;
4126 
4127 pull:
4128         grow = skb_gro_offset(skb) - skb_headlen(skb);
4129         if (grow > 0)
4130                 gro_pull_from_frag0(skb, grow);
4131 ok:
4132         return ret;
4133 
4134 normal:
4135         ret = GRO_NORMAL;
4136         goto pull;
4137 }
4138 
4139 struct packet_offload *gro_find_receive_by_type(__be16 type)
4140 {
4141         struct list_head *offload_head = &offload_base;
4142         struct packet_offload *ptype;
4143 
4144         list_for_each_entry_rcu(ptype, offload_head, list) {
4145                 if (ptype->type != type || !ptype->callbacks.gro_receive)
4146                         continue;
4147                 return ptype;
4148         }
4149         return NULL;
4150 }
4151 EXPORT_SYMBOL(gro_find_receive_by_type);
4152 
4153 struct packet_offload *gro_find_complete_by_type(__be16 type)
4154 {
4155         struct list_head *offload_head = &offload_base;
4156         struct packet_offload *ptype;
4157 
4158         list_for_each_entry_rcu(ptype, offload_head, list) {
4159                 if (ptype->type != type || !ptype->callbacks.gro_complete)
4160                         continue;
4161                 return ptype;
4162         }
4163         return NULL;
4164 }
4165 EXPORT_SYMBOL(gro_find_complete_by_type);
4166 
4167 static gro_result_t napi_skb_finish(gro_result_t ret, struct sk_buff *skb)
4168 {
4169         switch (ret) {
4170         case GRO_NORMAL:
4171                 if (netif_receive_skb_internal(skb))
4172                         ret = GRO_DROP;
4173                 break;
4174 
4175         case GRO_DROP:
4176                 kfree_skb(skb);
4177                 break;
4178 
4179         case GRO_MERGED_FREE:
4180                 if (NAPI_GRO_CB(skb)->free == NAPI_GRO_FREE_STOLEN_HEAD)
4181                         kmem_cache_free(skbuff_head_cache, skb);
4182                 else
4183                         __kfree_skb(skb);
4184                 break;
4185 
4186         case GRO_HELD:
4187         case GRO_MERGED:
4188                 break;
4189         }
4190 
4191         return ret;
4192 }
4193 
4194 gro_result_t napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
4195 {
4196         trace_napi_gro_receive_entry(skb);
4197 
4198         skb_gro_reset_offset(skb);
4199 
4200         return napi_skb_finish(dev_gro_receive(napi, skb), skb);
4201 }
4202 EXPORT_SYMBOL(napi_gro_receive);
4203 
4204 static void napi_reuse_skb(struct napi_struct *napi, struct sk_buff *skb)
4205 {
4206         if (unlikely(skb->pfmemalloc)) {
4207                 consume_skb(skb);
4208                 return;
4209         }
4210         __skb_pull(skb, skb_headlen(skb));
4211         /* restore the reserve we had after netdev_alloc_skb_ip_align() */
4212         skb_reserve(skb, NET_SKB_PAD + NET_IP_ALIGN - skb_headroom(skb));
4213         skb->vlan_tci = 0;
4214         skb->dev = napi->dev;
4215         skb->skb_iif = 0;
4216         skb->encapsulation = 0;
4217         skb_shinfo(skb)->gso_type = 0;
4218         skb->truesize = SKB_TRUESIZE(skb_end_offset(skb));
4219 
4220         napi->skb = skb;
4221 }
4222 
4223 struct sk_buff *napi_get_frags(struct napi_struct *napi)
4224 {
4225         struct sk_buff *skb = napi->skb;
4226 
4227         if (!skb) {
4228                 skb = napi_alloc_skb(napi, GRO_MAX_HEAD);
4229                 napi->skb = skb;
4230         }
4231         return skb;
4232 }
4233 EXPORT_SYMBOL(napi_get_frags);
4234 
4235 static gro_result_t napi_frags_finish(struct napi_struct *napi,
4236                                       struct sk_buff *skb,
4237                                       gro_result_t ret)
4238 {
4239         switch (ret) {
4240         case GRO_NORMAL:
4241         case GRO_HELD:
4242                 __skb_push(skb, ETH_HLEN);
4243                 skb->protocol = eth_type_trans(skb, skb->dev);
4244                 if (ret == GRO_NORMAL && netif_receive_skb_internal(skb))
4245                         ret = GRO_DROP;
4246                 break;
4247 
4248         case GRO_DROP:
4249         case GRO_MERGED_FREE:
4250                 napi_reuse_skb(napi, skb);
4251                 break;
4252 
4253         case GRO_MERGED:
4254                 break;
4255         }
4256 
4257         return ret;
4258 }
4259 
4260 /* Upper GRO stack assumes network header starts at gro_offset=0
4261  * Drivers could call both napi_gro_frags() and napi_gro_receive()
4262  * We copy ethernet header into skb->data to have a common layout.
4263  */
4264 static struct sk_buff *napi_frags_skb(struct napi_struct *napi)
4265 {
4266         struct sk_buff *skb = napi->skb;
4267         const struct ethhdr *eth;
4268         unsigned int hlen = sizeof(*eth);
4269 
4270         napi->skb = NULL;
4271 
4272         skb_reset_mac_header(skb);
4273         skb_gro_reset_offset(skb);
4274 
4275         eth = skb_gro_header_fast(skb, 0);
4276         if (unlikely(skb_gro_header_hard(skb, hlen))) {
4277                 eth = skb_gro_header_slow(skb, hlen, 0);
4278                 if (unlikely(!eth)) {
4279                         napi_reuse_skb(napi, skb);
4280                         return NULL;
4281                 }
4282         } else {
4283                 gro_pull_from_frag0(skb, hlen);
4284                 NAPI_GRO_CB(skb)->frag0 += hlen;
4285                 NAPI_GRO_CB(skb)->frag0_len -= hlen;
4286         }
4287         __skb_pull(skb, hlen);
4288 
4289         /*
4290          * This works because the only protocols we care about don't require
4291          * special handling.
4292          * We'll fix it up properly in napi_frags_finish()
4293          */
4294         skb->protocol = eth->h_proto;
4295 
4296         return skb;
4297 }
4298 
4299 gro_result_t napi_gro_frags(struct napi_struct *napi)
4300 {
4301         struct sk_buff *skb = napi_frags_skb(napi);
4302 
4303         if (!skb)
4304                 return GRO_DROP;
4305 
4306         trace_napi_gro_frags_entry(skb);
4307 
4308         return napi_frags_finish(napi, skb, dev_gro_receive(napi, skb));
4309 }
4310 EXPORT_SYMBOL(napi_gro_frags);
4311 
4312 /* Compute the checksum from gro_offset and return the folded value
4313  * after adding in any pseudo checksum.
4314  */
4315 __sum16 __skb_gro_checksum_complete(struct sk_buff *skb)
4316 {
4317         __wsum wsum;
4318         __sum16 sum;
4319 
4320         wsum = skb_checksum(skb, skb_gro_offset(skb), skb_gro_len(skb), 0);
4321 
4322         /* NAPI_GRO_CB(skb)->csum holds pseudo checksum */
4323         sum = csum_fold(csum_add(NAPI_GRO_CB(skb)->csum, wsum));
4324         if (likely(!sum)) {
4325                 if (unlikely(skb->ip_summed == CHECKSUM_COMPLETE) &&
4326                     !skb->csum_complete_sw)
4327                         netdev_rx_csum_fault(skb->dev);
4328         }
4329 
4330         NAPI_GRO_CB(skb)->csum = wsum;
4331         NAPI_GRO_CB(skb)->csum_valid = 1;
4332 
4333         return sum;
4334 }
4335 EXPORT_SYMBOL(__skb_gro_checksum_complete);
4336 
4337 /*
4338  * net_rps_action_and_irq_enable sends any pending IPI's for rps.
4339  * Note: called with local irq disabled, but exits with local irq enabled.
4340  */
4341 static void net_rps_action_and_irq_enable(struct softnet_data *sd)
4342 {
4343 #ifdef CONFIG_RPS
4344         struct softnet_data *remsd = sd->rps_ipi_list;
4345 
4346         if (remsd) {
4347                 sd->rps_ipi_list = NULL;
4348 
4349                 local_irq_enable();
4350 
4351                 /* Send pending IPI's to kick RPS processing on remote cpus. */
4352                 while (remsd) {
4353                         struct softnet_data *next = remsd->rps_ipi_next;
4354 
4355                         if (cpu_online(remsd->cpu))
4356                                 smp_call_function_single_async(remsd->cpu,
4357                                                            &remsd->csd);
4358                         remsd = next;
4359                 }
4360         } else
4361 #endif
4362                 local_irq_enable();
4363 }
4364 
4365 static bool sd_has_rps_ipi_waiting(struct softnet_data *sd)
4366 {
4367 #ifdef CONFIG_RPS
4368         return sd->rps_ipi_list != NULL;
4369 #else
4370         return false;
4371 #endif
4372 }
4373 
4374 static int process_backlog(struct napi_struct *napi, int quota)
4375 {
4376         int work = 0;
4377         struct softnet_data *sd = container_of(napi, struct softnet_data, backlog);
4378 
4379         /* Check if we have pending ipi, its better to send them now,
4380          * not waiting net_rx_action() end.
4381          */
4382         if (sd_has_rps_ipi_waiting(sd)) {
4383                 local_irq_disable();
4384                 net_rps_action_and_irq_enable(sd);
4385         }
4386 
4387         napi->weight = weight_p;
4388         local_irq_disable();
4389         while (1) {
4390                 struct sk_buff *skb;
4391 
4392                 while ((skb = __skb_dequeue(&sd->process_queue))) {
4393                         local_irq_enable();
4394                         __netif_receive_skb(skb);
4395                         local_irq_disable();
4396                         input_queue_head_incr(sd);
4397                         if (++work >= quota) {
4398                                 local_irq_enable();
4399                                 return work;
4400                         }
4401                 }
4402 
4403                 rps_lock(sd);
4404                 if (skb_queue_empty(&sd->input_pkt_queue)) {
4405                         /*
4406                          * Inline a custom version of __napi_complete().
4407                          * only current cpu owns and manipulates this napi,
4408                          * and NAPI_STATE_SCHED is the only possible flag set
4409                          * on backlog.
4410                          * We can use a plain write instead of clear_bit(),
4411                          * and we dont need an smp_mb() memory barrier.
4412                          */
4413                         napi->state = 0;
4414                         rps_unlock(sd);
4415 
4416                         break;
4417                 }
4418 
4419                 skb_queue_splice_tail_init(&sd->input_pkt_queue,
4420                                            &sd->process_queue);
4421                 rps_unlock(sd);
4422         }
4423         local_irq_enable();
4424 
4425         return work;
4426 }
4427 
4428 /**
4429  * __napi_schedule - schedule for receive
4430  * @n: entry to schedule
4431  *
4432  * The entry's receive function will be scheduled to run.
4433  * Consider using __napi_schedule_irqoff() if hard irqs are masked.
4434  */
4435 void __napi_schedule(struct napi_struct *n)
4436 {
4437         unsigned long flags;
4438 
4439         local_irq_save(flags);
4440         ____napi_schedule(this_cpu_ptr(&softnet_data), n);
4441         local_irq_restore(flags);
4442 }
4443 EXPORT_SYMBOL(__napi_schedule);
4444 
4445 /**
4446  * __napi_schedule_irqoff - schedule for receive
4447  * @n: entry to schedule
4448  *
4449  * Variant of __napi_schedule() assuming hard irqs are masked
4450  */
4451 void __napi_schedule_irqoff(struct napi_struct *n)
4452 {
4453         ____napi_schedule(this_cpu_ptr(&softnet_data), n);
4454 }
4455 EXPORT_SYMBOL(__napi_schedule_irqoff);
4456 
4457 void __napi_complete(struct napi_struct *n)
4458 {
4459         BUG_ON(!test_bit(NAPI_STATE_SCHED, &n->state));
4460 
4461         list_del_init(&n->poll_list);
4462         smp_mb__before_atomic();
4463         clear_bit(NAPI_STATE_SCHED, &n->state);
4464 }
4465 EXPORT_SYMBOL(__napi_complete);
4466 
4467 void napi_complete_done(struct napi_struct *n, int work_done)
4468 {
4469         unsigned long flags;
4470 
4471         /*
4472          * don't let napi dequeue from the cpu poll list
4473          * just in case its running on a different cpu
4474          */
4475         if (unlikely(test_bit(NAPI_STATE_NPSVC, &n->state)))
4476                 return;
4477 
4478         if (n->gro_list) {
4479                 unsigned long timeout = 0;
4480 
4481                 if (work_done)
4482                         timeout = n->dev->gro_flush_timeout;
4483 
4484                 if (timeout)
4485                         hrtimer_start(&n->timer, ns_to_ktime(timeout),
4486                                       HRTIMER_MODE_REL_PINNED);
4487                 else
4488                         napi_gro_flush(n, false);
4489         }
4490         if (likely(list_empty(&n->poll_list))) {
4491                 WARN_ON_ONCE(!test_and_clear_bit(NAPI_STATE_SCHED, &n->state));
4492         } else {
4493                 /* If n->poll_list is not empty, we need to mask irqs */
4494                 local_irq_save(flags);
4495                 __napi_complete(n);
4496                 local_irq_restore(flags);
4497         }
4498 }
4499 EXPORT_SYMBOL(napi_complete_done);
4500 
4501 /* must be called under rcu_read_lock(), as we dont take a reference */
4502 struct napi_struct *napi_by_id(unsigned int napi_id)
4503 {
4504         unsigned int hash = napi_id % HASH_SIZE(napi_hash);
4505         struct napi_struct *napi;
4506 
4507         hlist_for_each_entry_rcu(napi, &napi_hash[hash], napi_hash_node)
4508                 if (napi->napi_id == napi_id)
4509                         return napi;
4510 
4511         return NULL;
4512 }
4513 EXPORT_SYMBOL_GPL(napi_by_id);
4514 
4515 void napi_hash_add(struct napi_struct *napi)
4516 {
4517         if (!test_and_set_bit(NAPI_STATE_HASHED, &napi->state)) {
4518 
4519                 spin_lock(&napi_hash_lock);
4520 
4521                 /* 0 is not a valid id, we also skip an id that is taken
4522                  * we expect both events to be extremely rare
4523                  */
4524                 napi->napi_id = 0;
4525                 while (!napi->napi_id) {
4526                         napi->napi_id = ++napi_gen_id;
4527                         if (napi_by_id(napi->napi_id))
4528                                 napi->napi_id = 0;
4529                 }
4530 
4531                 hlist_add_head_rcu(&napi->napi_hash_node,
4532                         &napi_hash[napi->napi_id % HASH_SIZE(napi_hash)]);
4533 
4534                 spin_unlock(&napi_hash_lock);
4535         }
4536 }
4537 EXPORT_SYMBOL_GPL(napi_hash_add);
4538 
4539 /* Warning : caller is responsible to make sure rcu grace period
4540  * is respected before freeing memory containing @napi
4541  */
4542 void napi_hash_del(struct napi_struct *napi)
4543 {
4544         spin_lock(&napi_hash_lock);
4545 
4546         if (test_and_clear_bit(NAPI_STATE_HASHED, &napi->state))
4547                 hlist_del_rcu(&napi->napi_hash_node);
4548 
4549         spin_unlock(&napi_hash_lock);
4550 }
4551 EXPORT_SYMBOL_GPL(napi_hash_del);
4552 
4553 static enum hrtimer_restart napi_watchdog(struct hrtimer *timer)
4554 {
4555         struct napi_struct *napi;
4556 
4557         napi = container_of(timer, struct napi_struct, timer);
4558         if (napi->gro_list)
4559                 napi_schedule(napi);
4560 
4561         return HRTIMER_NORESTART;
4562 }
4563 
4564 void netif_napi_add(struct net_device *dev, struct napi_struct *napi,
4565                     int (*poll)(struct napi_struct *, int), int weight)
4566 {
4567         INIT_LIST_HEAD(&napi->poll_list);
4568         hrtimer_init(&napi->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
4569         napi->timer.function = napi_watchdog;
4570         napi->gro_count = 0;
4571         napi->gro_list = NULL;
4572         napi->skb = NULL;
4573         napi->poll = poll;
4574         if (weight > NAPI_POLL_WEIGHT)
4575                 pr_err_once("netif_napi_add() called with weight %d on device %s\n",
4576                             weight, dev->name);
4577         napi->weight = weight;
4578         list_add(&napi->dev_list, &dev->napi_list);
4579         napi->dev = dev;
4580 #ifdef CONFIG_NETPOLL
4581         spin_lock_init(&napi->poll_lock);
4582         napi->poll_owner = -1;
4583 #endif
4584         set_bit(NAPI_STATE_SCHED, &napi->state);
4585 }
4586 EXPORT_SYMBOL(netif_napi_add);
4587 
4588 void napi_disable(struct napi_struct *n)
4589 {
4590         might_sleep();
4591         set_bit(NAPI_STATE_DISABLE, &n->state);
4592 
4593         while (test_and_set_bit(NAPI_STATE_SCHED, &n->state))
4594                 msleep(1);
4595 
4596         hrtimer_cancel(&n->timer);
4597 
4598         clear_bit(NAPI_STATE_DISABLE, &n->state);
4599 }
4600 EXPORT_SYMBOL(napi_disable);
4601 
4602 void netif_napi_del(struct napi_struct *napi)
4603 {
4604         list_del_init(&napi->dev_list);
4605         napi_free_frags(napi);
4606 
4607         kfree_skb_list(napi->gro_list);
4608         napi->gro_list = NULL;
4609         napi->gro_count = 0;
4610 }
4611 EXPORT_SYMBOL(netif_napi_del);
4612 
4613 static int napi_poll(struct napi_struct *n, struct list_head *repoll)
4614 {
4615         void *have;
4616         int work, weight;
4617 
4618         list_del_init(&n->poll_list);
4619 
4620         have = netpoll_poll_lock(n);
4621 
4622         weight = n->weight;
4623 
4624         /* This NAPI_STATE_SCHED test is for avoiding a race
4625          * with netpoll's poll_napi().  Only the entity which
4626          * obtains the lock and sees NAPI_STATE_SCHED set will
4627          * actually make the ->poll() call.  Therefore we avoid
4628          * accidentally calling ->poll() when NAPI is not scheduled.
4629          */
4630         work = 0;
4631         if (test_bit(NAPI_STATE_SCHED, &n->state)) {
4632                 work = n->poll(n, weight);
4633                 trace_napi_poll(n);
4634         }
4635 
4636         WARN_ON_ONCE(work > weight);
4637 
4638         if (likely(work < weight))
4639                 goto out_unlock;
4640 
4641         /* Drivers must not modify the NAPI state if they
4642          * consume the entire weight.  In such cases this code
4643          * still "owns" the NAPI instance and therefore can
4644          * move the instance around on the list at-will.
4645          */
4646         if (unlikely(napi_disable_pending(n))) {
4647                 napi_complete(n);
4648                 goto out_unlock;
4649         }
4650 
4651         if (n->gro_list) {
4652                 /* flush too old packets
4653                  * If HZ < 1000, flush all packets.
4654                  */
4655                 napi_gro_flush(n, HZ >= 1000);
4656         }
4657 
4658         /* Some drivers may have called napi_schedule
4659          * prior to exhausting their budget.
4660          */
4661         if (unlikely(!list_empty(&n->poll_list))) {
4662                 pr_warn_once("%s: Budget exhausted after napi rescheduled\n",
4663                              n->dev ? n->dev->name : "backlog");
4664                 goto out_unlock;
4665         }
4666 
4667         list_add_tail(&n->poll_list, repoll);
4668 
4669 out_unlock:
4670         netpoll_poll_unlock(have);
4671 
4672         return work;
4673 }
4674 
4675 static void net_rx_action(struct softirq_action *h)
4676 {
4677         struct softnet_data *sd = this_cpu_ptr(&softnet_data);
4678         unsigned long time_limit = jiffies + 2;
4679         int budget = netdev_budget;
4680         LIST_HEAD(list);
4681         LIST_HEAD(repoll);
4682 
4683         local_irq_disable();
4684         list_splice_init(&sd->poll_list, &list);
4685         local_irq_enable();
4686 
4687         for (;;) {
4688                 struct napi_struct *n;
4689 
4690                 if (list_empty(&list)) {
4691                         if (!sd_has_rps_ipi_waiting(sd) && list_empty(&repoll))
4692                                 return;
4693                         break;
4694                 }
4695 
4696                 n = list_first_entry(&list, struct napi_struct, poll_list);
4697                 budget -= napi_poll(n, &repoll);
4698 
4699                 /* If softirq window is exhausted then punt.
4700                  * Allow this to run for 2 jiffies since which will allow
4701                  * an average latency of 1.5/HZ.
4702                  */
4703                 if (unlikely(budget <= 0 ||
4704                              time_after_eq(jiffies, time_limit))) {
4705                         sd->time_squeeze++;
4706                         break;
4707                 }
4708         }
4709 
4710         local_irq_disable();
4711 
4712         list_splice_tail_init(&sd->poll_list, &list);
4713         list_splice_tail(&repoll, &list);
4714         list_splice(&list, &sd->poll_list);
4715         if (!list_empty(&sd->poll_list))
4716                 __raise_softirq_irqoff(NET_RX_SOFTIRQ);
4717 
4718         net_rps_action_and_irq_enable(sd);
4719 }
4720 
4721 struct netdev_adjacent {
4722         struct net_device *dev;
4723 
4724         /* upper master flag, there can only be one master device per list */
4725         bool master;
4726 
4727         /* counter for the number of times this device was added to us */
4728         u16 ref_nr;
4729 
4730         /* private field for the users */
4731         void *private;
4732 
4733         struct list_head list;
4734         struct rcu_head rcu;
4735 };
4736 
4737 static struct netdev_adjacent *__netdev_find_adj(struct net_device *dev,
4738                                                  struct net_device *adj_dev,
4739                                                  struct list_head *adj_list)
4740 {
4741         struct netdev_adjacent *adj;
4742 
4743         list_for_each_entry(adj, adj_list, list) {
4744                 if (adj->dev == adj_dev)
4745                         return adj;
4746         }
4747         return NULL;
4748 }
4749 
4750 /**
4751  * netdev_has_upper_dev - Check if device is linked to an upper device
4752  * @dev: device
4753  * @upper_dev: upper device to check
4754  *
4755  * Find out if a device is linked to specified upper device and return true
4756  * in case it is. Note that this checks only immediate upper device,
4757  * not through a complete stack of devices. The caller must hold the RTNL lock.
4758  */
4759 bool netdev_has_upper_dev(struct net_device *dev,
4760                           struct net_device *upper_dev)
4761 {
4762         ASSERT_RTNL();
4763 
4764         return __netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper);
4765 }
4766 EXPORT_SYMBOL(netdev_has_upper_dev);
4767 
4768 /**
4769  * netdev_has_any_upper_dev - Check if device is linked to some device
4770  * @dev: device
4771  *
4772  * Find out if a device is linked to an upper device and return true in case
4773  * it is. The caller must hold the RTNL lock.
4774  */
4775 static bool netdev_has_any_upper_dev(struct net_device *dev)
4776 {
4777         ASSERT_RTNL();
4778 
4779         return !list_empty(&dev->all_adj_list.upper);
4780 }
4781 
4782 /**
4783  * netdev_master_upper_dev_get - Get master upper device
4784  * @dev: device
4785  *
4786  * Find a master upper device and return pointer to it or NULL in case
4787  * it's not there. The caller must hold the RTNL lock.
4788  */
4789 struct net_device *netdev_master_upper_dev_get(struct net_device *dev)
4790 {
4791         struct netdev_adjacent *upper;
4792 
4793         ASSERT_RTNL();
4794 
4795         if (list_empty(&dev->adj_list.upper))
4796                 return NULL;
4797 
4798         upper = list_first_entry(&dev->adj_list.upper,
4799                                  struct netdev_adjacent, list);
4800         if (likely(upper->master))
4801                 return upper->dev;
4802         return NULL;
4803 }
4804 EXPORT_SYMBOL(netdev_master_upper_dev_get);
4805 
4806 void *netdev_adjacent_get_private(struct list_head *adj_list)
4807 {
4808         struct netdev_adjacent *adj;
4809 
4810         adj = list_entry(adj_list, struct netdev_adjacent, list);
4811 
4812         return adj->private;
4813 }
4814 EXPORT_SYMBOL(netdev_adjacent_get_private);
4815 
4816 /**
4817  * netdev_upper_get_next_dev_rcu - Get the next dev from upper list
4818  * @dev: device
4819  * @iter: list_head ** of the current position
4820  *
4821  * Gets the next device from the dev's upper list, starting from iter
4822  * position. The caller must hold RCU read lock.
4823  */
4824 struct net_device *netdev_upper_get_next_dev_rcu(struct net_device *dev,
4825                                                  struct list_head **iter)
4826 {
4827         struct netdev_adjacent *upper;
4828 
4829         WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
4830 
4831         upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4832 
4833         if (&upper->list == &dev->adj_list.upper)
4834                 return NULL;
4835 
4836         *iter = &upper->list;
4837 
4838         return upper->dev;
4839 }
4840 EXPORT_SYMBOL(netdev_upper_get_next_dev_rcu);
4841 
4842 /**
4843  * netdev_all_upper_get_next_dev_rcu - Get the next dev from upper list
4844  * @dev: device
4845  * @iter: list_head ** of the current position
4846  *
4847  * Gets the next device from the dev's upper list, starting from iter
4848  * position. The caller must hold RCU read lock.
4849  */
4850 struct net_device *netdev_all_upper_get_next_dev_rcu(struct net_device *dev,
4851                                                      struct list_head **iter)
4852 {
4853         struct netdev_adjacent *upper;
4854 
4855         WARN_ON_ONCE(!rcu_read_lock_held() && !lockdep_rtnl_is_held());
4856 
4857         upper = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4858 
4859         if (&upper->list == &dev->all_adj_list.upper)
4860                 return NULL;
4861 
4862         *iter = &upper->list;
4863 
4864         return upper->dev;
4865 }
4866 EXPORT_SYMBOL(netdev_all_upper_get_next_dev_rcu);
4867 
4868 /**
4869  * netdev_lower_get_next_private - Get the next ->private from the
4870  *                                 lower neighbour list
4871  * @dev: device
4872  * @iter: list_head ** of the current position
4873  *
4874  * Gets the next netdev_adjacent->private from the dev's lower neighbour
4875  * list, starting from iter position. The caller must hold either hold the
4876  * RTNL lock or its own locking that guarantees that the neighbour lower
4877  * list will remain unchainged.
4878  */
4879 void *netdev_lower_get_next_private(struct net_device *dev,
4880                                     struct list_head **iter)
4881 {
4882         struct netdev_adjacent *lower;
4883 
4884         lower = list_entry(*iter, struct netdev_adjacent, list);
4885 
4886         if (&lower->list == &dev->adj_list.lower)
4887                 return NULL;
4888 
4889         *iter = lower->list.next;
4890 
4891         return lower->private;
4892 }
4893 EXPORT_SYMBOL(netdev_lower_get_next_private);
4894 
4895 /**
4896  * netdev_lower_get_next_private_rcu - Get the next ->private from the
4897  *                                     lower neighbour list, RCU
4898  *                                     variant
4899  * @dev: device
4900  * @iter: list_head ** of the current position
4901  *
4902  * Gets the next netdev_adjacent->private from the dev's lower neighbour
4903  * list, starting from iter position. The caller must hold RCU read lock.
4904  */
4905 void *netdev_lower_get_next_private_rcu(struct net_device *dev,
4906                                         struct list_head **iter)
4907 {
4908         struct netdev_adjacent *lower;
4909 
4910         WARN_ON_ONCE(!rcu_read_lock_held());
4911 
4912         lower = list_entry_rcu((*iter)->next, struct netdev_adjacent, list);
4913 
4914         if (&lower->list == &dev->adj_list.lower)
4915                 return NULL;
4916 
4917         *iter = &lower->list;
4918 
4919         return lower->private;
4920 }
4921 EXPORT_SYMBOL(netdev_lower_get_next_private_rcu);
4922 
4923 /**
4924  * netdev_lower_get_next - Get the next device from the lower neighbour
4925  *                         list
4926  * @dev: device
4927  * @iter: list_head ** of the current position
4928  *
4929  * Gets the next netdev_adjacent from the dev's lower neighbour
4930  * list, starting from iter position. The caller must hold RTNL lock or
4931  * its own locking that guarantees that the neighbour lower
4932  * list will remain unchainged.
4933  */
4934 void *netdev_lower_get_next(struct net_device *dev, struct list_head **iter)
4935 {
4936         struct netdev_adjacent *lower;
4937 
4938         lower = list_entry((*iter)->next, struct netdev_adjacent, list);
4939 
4940         if (&lower->list == &dev->adj_list.lower)
4941                 return NULL;
4942 
4943         *iter = &lower->list;
4944 
4945         return lower->dev;
4946 }
4947 EXPORT_SYMBOL(netdev_lower_get_next);
4948 
4949 /**
4950  * netdev_lower_get_first_private_rcu - Get the first ->private from the
4951  *                                     lower neighbour list, RCU
4952  *                                     variant
4953  * @dev: device
4954  *
4955  * Gets the first netdev_adjacent->private from the dev's lower neighbour
4956  * list. The caller must hold RCU read lock.
4957  */
4958 void *netdev_lower_get_first_private_rcu(struct net_device *dev)
4959 {
4960         struct netdev_adjacent *lower;
4961 
4962         lower = list_first_or_null_rcu(&dev->adj_list.lower,
4963                         struct netdev_adjacent, list);
4964         if (lower)
4965                 return lower->private;
4966         return NULL;
4967 }
4968 EXPORT_SYMBOL(netdev_lower_get_first_private_rcu);
4969 
4970 /**
4971  * netdev_master_upper_dev_get_rcu - Get master upper device
4972  * @dev: device
4973  *
4974  * Find a master upper device and return pointer to it or NULL in case
4975  * it's not there. The caller must hold the RCU read lock.
4976  */
4977 struct net_device *netdev_master_upper_dev_get_rcu(struct net_device *dev)
4978 {
4979         struct netdev_adjacent *upper;
4980 
4981         upper = list_first_or_null_rcu(&dev->adj_list.upper,
4982                                        struct netdev_adjacent, list);
4983         if (upper && likely(upper->master))
4984                 return upper->dev;
4985         return NULL;
4986 }
4987 EXPORT_SYMBOL(netdev_master_upper_dev_get_rcu);
4988 
4989 static int netdev_adjacent_sysfs_add(struct net_device *dev,
4990                               struct net_device *adj_dev,
4991                               struct list_head *dev_list)
4992 {
4993         char linkname[IFNAMSIZ+7];
4994         sprintf(linkname, dev_list == &dev->adj_list.upper ?
4995                 "upper_%s" : "lower_%s", adj_dev->name);
4996         return sysfs_create_link(&(dev->dev.kobj), &(adj_dev->dev.kobj),
4997                                  linkname);
4998 }
4999 static void netdev_adjacent_sysfs_del(struct net_device *dev,
5000                                char *name,
5001                                struct list_head *dev_list)
5002 {
5003         char linkname[IFNAMSIZ+7];
5004         sprintf(linkname, dev_list == &dev->adj_list.upper ?
5005                 "upper_%s" : "lower_%s", name);
5006         sysfs_remove_link(&(dev->dev.kobj), linkname);
5007 }
5008 
5009 static inline bool netdev_adjacent_is_neigh_list(struct net_device *dev,
5010                                                  struct net_device *adj_dev,
5011                                                  struct list_head *dev_list)
5012 {
5013         return (dev_list == &dev->adj_list.upper ||
5014                 dev_list == &dev->adj_list.lower) &&
5015                 net_eq(dev_net(dev), dev_net(adj_dev));
5016 }
5017 
5018 static int __netdev_adjacent_dev_insert(struct net_device *dev,
5019                                         struct net_device *adj_dev,
5020                                         struct list_head *dev_list,
5021                                         void *private, bool master)
5022 {
5023         struct netdev_adjacent *adj;
5024         int ret;
5025 
5026         adj = __netdev_find_adj(dev, adj_dev, dev_list);
5027 
5028         if (adj) {
5029                 adj->ref_nr++;
5030                 return 0;
5031         }
5032 
5033         adj = kmalloc(sizeof(*adj), GFP_KERNEL);
5034         if (!adj)
5035                 return -ENOMEM;
5036 
5037         adj->dev = adj_dev;
5038         adj->master = master;
5039         adj->ref_nr = 1;
5040         adj->private = private;
5041         dev_hold(adj_dev);
5042 
5043         pr_debug("dev_hold for %s, because of link added from %s to %s\n",
5044                  adj_dev->name, dev->name, adj_dev->name);
5045 
5046         if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list)) {
5047                 ret = netdev_adjacent_sysfs_add(dev, adj_dev, dev_list);
5048                 if (ret)
5049                         goto free_adj;
5050         }
5051 
5052         /* Ensure that master link is always the first item in list. */
5053         if (master) {
5054                 ret = sysfs_create_link(&(dev->dev.kobj),
5055                                         &(adj_dev->dev.kobj), "master");
5056                 if (ret)
5057                         goto remove_symlinks;
5058 
5059                 list_add_rcu(&adj->list, dev_list);
5060         } else {
5061                 list_add_tail_rcu(&adj->list, dev_list);
5062         }
5063 
5064         return 0;
5065 
5066 remove_symlinks:
5067         if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
5068                 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
5069 free_adj:
5070         kfree(adj);
5071         dev_put(adj_dev);
5072 
5073         return ret;
5074 }
5075 
5076 static void __netdev_adjacent_dev_remove(struct net_device *dev,
5077                                          struct net_device *adj_dev,
5078                                          struct list_head *dev_list)
5079 {
5080         struct netdev_adjacent *adj;
5081 
5082         adj = __netdev_find_adj(dev, adj_dev, dev_list);
5083 
5084         if (!adj) {
5085                 pr_err("tried to remove device %s from %s\n",
5086                        dev->name, adj_dev->name);
5087                 BUG();
5088         }
5089 
5090         if (adj->ref_nr > 1) {
5091                 pr_debug("%s to %s ref_nr-- = %d\n", dev->name, adj_dev->name,
5092                          adj->ref_nr-1);
5093                 adj->ref_nr--;
5094                 return;
5095         }
5096 
5097         if (adj->master)
5098                 sysfs_remove_link(&(dev->dev.kobj), "master");
5099 
5100         if (netdev_adjacent_is_neigh_list(dev, adj_dev, dev_list))
5101                 netdev_adjacent_sysfs_del(dev, adj_dev->name, dev_list);
5102 
5103         list_del_rcu(&adj->list);
5104         pr_debug("dev_put for %s, because link removed from %s to %s\n",
5105                  adj_dev->name, dev->name, adj_dev->name);
5106         dev_put(adj_dev);
5107         kfree_rcu(adj, rcu);
5108 }
5109 
5110 static int __netdev_adjacent_dev_link_lists(struct net_device *dev,
5111                                             struct net_device *upper_dev,
5112                                             struct list_head *up_list,
5113                                             struct list_head *down_list,
5114                                             void *private, bool master)
5115 {
5116         int ret;
5117 
5118         ret = __netdev_adjacent_dev_insert(dev, upper_dev, up_list, private,
5119                                            master);
5120         if (ret)
5121                 return ret;
5122 
5123         ret = __netdev_adjacent_dev_insert(upper_dev, dev, down_list, private,
5124                                            false);
5125         if (ret) {
5126                 __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
5127                 return ret;
5128         }
5129 
5130         return 0;
5131 }
5132 
5133 static int __netdev_adjacent_dev_link(struct net_device *dev,
5134                                       struct net_device *upper_dev)
5135 {
5136         return __netdev_adjacent_dev_link_lists(dev, upper_dev,
5137                                                 &dev->all_adj_list.upper,
5138                                                 &upper_dev->all_adj_list.lower,
5139                                                 NULL, false);
5140 }
5141 
5142 static void __netdev_adjacent_dev_unlink_lists(struct net_device *dev,
5143                                                struct net_device *upper_dev,
5144                                                struct list_head *up_list,
5145                                                struct list_head *down_list)
5146 {
5147         __netdev_adjacent_dev_remove(dev, upper_dev, up_list);
5148         __netdev_adjacent_dev_remove(upper_dev, dev, down_list);
5149 }
5150 
5151 static void __netdev_adjacent_dev_unlink(struct net_device *dev,
5152                                          struct net_device *upper_dev)
5153 {
5154         __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
5155                                            &dev->all_adj_list.upper,
5156                                            &upper_dev->all_adj_list.lower);
5157 }
5158 
5159 static int __netdev_adjacent_dev_link_neighbour(struct net_device *dev,
5160                                                 struct net_device *upper_dev,
5161                                                 void *private, bool master)
5162 {
5163         int ret = __netdev_adjacent_dev_link(dev, upper_dev);
5164 
5165         if (ret)
5166                 return ret;
5167 
5168         ret = __netdev_adjacent_dev_link_lists(dev, upper_dev,
5169                                                &dev->adj_list.upper,
5170                                                &upper_dev->adj_list.lower,
5171                                                private, master);
5172         if (ret) {
5173                 __netdev_adjacent_dev_unlink(dev, upper_dev);
5174                 return ret;
5175         }
5176 
5177         return 0;
5178 }
5179 
5180 static void __netdev_adjacent_dev_unlink_neighbour(struct net_device *dev,
5181                                                    struct net_device *upper_dev)
5182 {
5183         __netdev_adjacent_dev_unlink(dev, upper_dev);
5184         __netdev_adjacent_dev_unlink_lists(dev, upper_dev,
5185                                            &dev->adj_list.upper,
5186                                            &upper_dev->adj_list.lower);
5187 }
5188 
5189 static int __netdev_upper_dev_link(struct net_device *dev,
5190                                    struct net_device *upper_dev, bool master,
5191                                    void *private)
5192 {
5193         struct netdev_adjacent *i, *j, *to_i, *to_j;
5194         int ret = 0;
5195 
5196         ASSERT_RTNL();
5197 
5198         if (dev == upper_dev)
5199                 return -EBUSY;
5200 
5201         /* To prevent loops, check if dev is not upper device to upper_dev. */
5202         if (__netdev_find_adj(upper_dev, dev, &upper_dev->all_adj_list.upper))
5203                 return -EBUSY;
5204 
5205         if (__netdev_find_adj(dev, upper_dev, &dev->adj_list.upper))
5206                 return -EEXIST;
5207 
5208         if (master && netdev_master_upper_dev_get(dev))
5209                 return -EBUSY;
5210 
5211         ret = __netdev_adjacent_dev_link_neighbour(dev, upper_dev, private,
5212                                                    master);
5213         if (ret)
5214                 return ret;
5215 
5216         /* Now that we linked these devs, make all the upper_dev's
5217          * all_adj_list.upper visible to every dev's all_adj_list.lower an
5218          * versa, and don't forget the devices itself. All of these
5219          * links are non-neighbours.
5220          */
5221         list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5222                 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
5223                         pr_debug("Interlinking %s with %s, non-neighbour\n",
5224                                  i->dev->name, j->dev->name);
5225                         ret = __netdev_adjacent_dev_link(i->dev, j->dev);
5226                         if (ret)
5227                                 goto rollback_mesh;
5228                 }
5229         }
5230 
5231         /* add dev to every upper_dev's upper device */
5232         list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
5233                 pr_debug("linking %s's upper device %s with %s\n",
5234                          upper_dev->name, i->dev->name, dev->name);
5235                 ret = __netdev_adjacent_dev_link(dev, i->dev);
5236                 if (ret)
5237                         goto rollback_upper_mesh;
5238         }
5239 
5240         /* add upper_dev to every dev's lower device */
5241         list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5242                 pr_debug("linking %s's lower device %s with %s\n", dev->name,
5243                          i->dev->name, upper_dev->name);
5244                 ret = __netdev_adjacent_dev_link(i->dev, upper_dev);
5245                 if (ret)
5246                         goto rollback_lower_mesh;
5247         }
5248 
5249         call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
5250         return 0;
5251 
5252 rollback_lower_mesh:
5253         to_i = i;
5254         list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5255                 if (i == to_i)
5256                         break;
5257                 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
5258         }
5259 
5260         i = NULL;
5261 
5262 rollback_upper_mesh:
5263         to_i = i;
5264         list_for_each_entry(i, &upper_dev->all_adj_list.upper, list) {
5265                 if (i == to_i)
5266                         break;
5267                 __netdev_adjacent_dev_unlink(dev, i->dev);
5268         }
5269 
5270         i = j = NULL;
5271 
5272 rollback_mesh:
5273         to_i = i;
5274         to_j = j;
5275         list_for_each_entry(i, &dev->all_adj_list.lower, list) {
5276                 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list) {
5277                         if (i == to_i && j == to_j)
5278                                 break;
5279                         __netdev_adjacent_dev_unlink(i->dev, j->dev);
5280                 }
5281                 if (i == to_i)
5282                         break;
5283         }
5284 
5285         __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
5286 
5287         return ret;
5288 }
5289 
5290 /**
5291  * netdev_upper_dev_link - Add a link to the upper device
5292  * @dev: device
5293  * @upper_dev: new upper device
5294  *
5295  * Adds a link to device which is upper to this one. The caller must hold
5296  * the RTNL lock. On a failure a negative errno code is returned.
5297  * On success the reference counts are adjusted and the function
5298  * returns zero.
5299  */
5300 int netdev_upper_dev_link(struct net_device *dev,
5301                           struct net_device *upper_dev)
5302 {
5303         return __netdev_upper_dev_link(dev, upper_dev, false, NULL);
5304 }
5305 EXPORT_SYMBOL(netdev_upper_dev_link);
5306 
5307 /**
5308  * netdev_master_upper_dev_link - Add a master link to the upper device
5309  * @dev: device
5310  * @upper_dev: new upper device
5311  *
5312  * Adds a link to device which is upper to this one. In this case, only
5313  * one master upper device can be linked, although other non-master devices
5314  * might be linked as well. The caller must hold the RTNL lock.
5315  * On a failure a negative errno code is returned. On success the reference
5316  * counts are adjusted and the function returns zero.
5317  */
5318 int netdev_master_upper_dev_link(struct net_device *dev,
5319                                  struct net_device *upper_dev)
5320 {
5321         return __netdev_upper_dev_link(dev, upper_dev, true, NULL);
5322 }
5323 EXPORT_SYMBOL(netdev_master_upper_dev_link);
5324 
5325 int netdev_master_upper_dev_link_private(struct net_device *dev,
5326                                          struct net_device *upper_dev,
5327                                          void *private)
5328 {
5329         return __netdev_upper_dev_link(dev, upper_dev, true, private);
5330 }
5331 EXPORT_SYMBOL(netdev_master_upper_dev_link_private);
5332 
5333 /**
5334  * netdev_upper_dev_unlink - Removes a link to upper device
5335  * @dev: device
5336  * @upper_dev: new upper device
5337  *
5338  * Removes a link to device which is upper to this one. The caller must hold
5339  * the RTNL lock.
5340  */
5341 void netdev_upper_dev_unlink(struct net_device *dev,
5342                              struct net_device *upper_dev)
5343 {
5344         struct netdev_adjacent *i, *j;
5345         ASSERT_RTNL();
5346 
5347         __netdev_adjacent_dev_unlink_neighbour(dev, upper_dev);
5348 
5349         /* Here is the tricky part. We must remove all dev's lower
5350          * devices from all upper_dev's upper devices and vice
5351          * versa, to maintain the graph relationship.
5352          */
5353         list_for_each_entry(i, &dev->all_adj_list.lower, list)
5354                 list_for_each_entry(j, &upper_dev->all_adj_list.upper, list)
5355                         __netdev_adjacent_dev_unlink(i->dev, j->dev);
5356 
5357         /* remove also the devices itself from lower/upper device
5358          * list
5359          */
5360         list_for_each_entry(i, &dev->all_adj_list.lower, list)
5361                 __netdev_adjacent_dev_unlink(i->dev, upper_dev);
5362 
5363         list_for_each_entry(i, &upper_dev->all_adj_list.upper, list)
5364                 __netdev_adjacent_dev_unlink(dev, i->dev);
5365 
5366         call_netdevice_notifiers(NETDEV_CHANGEUPPER, dev);
5367 }
5368 EXPORT_SYMBOL(netdev_upper_dev_unlink);
5369 
5370 /**
5371  * netdev_bonding_info_change - Dispatch event about slave change
5372  * @dev: device
5373  * @bonding_info: info to dispatch
5374  *
5375  * Send NETDEV_BONDING_INFO to netdev notifiers with info.
5376  * The caller must hold the RTNL lock.
5377  */
5378 void netdev_bonding_info_change(struct net_device *dev,
5379                                 struct netdev_bonding_info *bonding_info)
5380 {
5381         struct netdev_notifier_bonding_info     info;
5382 
5383         memcpy(&info.bonding_info, bonding_info,
5384                sizeof(struct netdev_bonding_info));
5385         call_netdevice_notifiers_info(NETDEV_BONDING_INFO, dev,
5386                                       &info.info);
5387 }
5388 EXPORT_SYMBOL(netdev_bonding_info_change);
5389 
5390 static void netdev_adjacent_add_links(struct net_device *dev)
5391 {
5392         struct netdev_adjacent *iter;
5393 
5394         struct net *net = dev_net(dev);
5395 
5396         list_for_each_entry(iter, &dev->adj_list.upper, list) {
5397                 if (!net_eq(net,dev_net(iter->dev)))
5398                         continue;
5399                 netdev_adjacent_sysfs_add(iter->dev, dev,
5400                                           &iter->dev->adj_list.lower);
5401                 netdev_adjacent_sysfs_add(dev, iter->dev,
5402                                           &dev->adj_list.upper);
5403         }
5404 
5405         list_for_each_entry(iter, &dev->adj_list.lower, list) {
5406                 if (!net_eq(net,dev_net(iter->dev)))
5407                         continue;
5408                 netdev_adjacent_sysfs_add(iter->dev, dev,
5409                                           &iter->dev->adj_list.upper);
5410                 netdev_adjacent_sysfs_add(dev, iter->dev,
5411                                           &dev->adj_list.lower);
5412         }
5413 }
5414 
5415 static void netdev_adjacent_del_links(struct net_device *dev)
5416 {
5417         struct netdev_adjacent *iter;
5418 
5419         struct net *net = dev_net(dev);
5420 
5421         list_for_each_entry(iter, &dev->adj_list.upper, list) {
5422                 if (!net_eq(net,dev_net(iter->dev)))
5423                         continue;
5424                 netdev_adjacent_sysfs_del(iter->dev, dev->name,
5425                                           &iter->dev->adj_list.lower);
5426                 netdev_adjacent_sysfs_del(dev, iter->dev->name,
5427                                           &dev->adj_list.upper);
5428         }
5429 
5430         list_for_each_entry(iter, &dev->adj_list.lower, list) {
5431                 if (!net_eq(net,dev_net(iter->dev)))
5432                         continue;
5433                 netdev_adjacent_sysfs_del(iter->dev, dev->name,
5434                                           &iter->dev->adj_list.upper);
5435                 netdev_adjacent_sysfs_del(dev, iter->dev->name,
5436                                           &dev->adj_list.lower);
5437         }
5438 }
5439 
5440 void netdev_adjacent_rename_links(struct net_device *dev, char *oldname)
5441 {
5442         struct netdev_adjacent *iter;
5443 
5444         struct net *net = dev_net(dev);
5445 
5446         list_for_each_entry(iter, &dev->adj_list.upper, list) {
5447                 if (!net_eq(net,dev_net(iter->dev)))
5448                         continue;
5449                 netdev_adjacent_sysfs_del(iter->dev, oldname,
5450                                           &iter->dev->adj_list.lower);
5451                 netdev_adjacent_sysfs_add(iter->dev, dev,
5452                                           &iter->dev->adj_list.lower);
5453         }
5454 
5455         list_for_each_entry(iter, &dev->adj_list.lower, list) {
5456                 if (!net_eq(net,dev_net(iter->dev)))
5457                         continue;
5458                 netdev_adjacent_sysfs_del(iter->dev, oldname,
5459                                           &iter->dev->adj_list.upper);
5460                 netdev_adjacent_sysfs_add(iter->dev, dev,
5461                                           &iter->dev->adj_list.upper);
5462         }
5463 }
5464 
5465 void *netdev_lower_dev_get_private(struct net_device *dev,
5466                                    struct net_device *lower_dev)
5467 {
5468         struct netdev_adjacent *lower;
5469 
5470         if (!lower_dev)
5471                 return NULL;
5472         lower = __netdev_find_adj(dev, lower_dev, &dev->adj_list.lower);
5473         if (!lower)
5474                 return NULL;
5475 
5476         return lower->private;
5477 }
5478 EXPORT_SYMBOL(netdev_lower_dev_get_private);
5479 
5480 
5481 int dev_get_nest_level(struct net_device *dev,
5482                        bool (*type_check)(struct net_device *dev))
5483 {
5484         struct net_device *lower = NULL;
5485         struct list_head *iter;
5486         int max_nest = -1;
5487         int nest;
5488 
5489         ASSERT_RTNL();
5490 
5491         netdev_for_each_lower_dev(dev, lower, iter) {
5492                 nest = dev_get_nest_level(lower, type_check);
5493                 if (max_nest < nest)
5494                         max_nest = nest;
5495         }
5496 
5497         if (type_check(dev))
5498                 max_nest++;
5499 
5500         return max_nest;
5501 }
5502 EXPORT_SYMBOL(dev_get_nest_level);
5503 
5504 static void dev_change_rx_flags(struct net_device *dev, int flags)
5505 {
5506         const struct net_device_ops *ops = dev->netdev_ops;
5507 
5508         if (ops->ndo_change_rx_flags)
5509                 ops->ndo_change_rx_flags(dev, flags);
5510 }
5511 
5512 static int __dev_set_promiscuity(struct net_device *dev, int inc, bool notify)
5513 {
5514         unsigned int old_flags = dev->flags;
5515         kuid_t uid;
5516         kgid_t gid;
5517 
5518         ASSERT_RTNL();
5519 
5520         dev->flags |= IFF_PROMISC;
5521         dev->promiscuity += inc;
5522         if (dev->promiscuity == 0) {
5523                 /*
5524                  * Avoid overflow.
5525                  * If inc causes overflow, untouch promisc and return error.
5526                  */
5527                 if (inc < 0)
5528                         dev->flags &= ~IFF_PROMISC;
5529                 else {
5530                         dev->promiscuity -= inc;
5531                         pr_warn("%s: promiscuity touches roof, set promiscuity failed. promiscuity feature of device might be broken.\n",
5532                                 dev->name);
5533                         return -EOVERFLOW;
5534                 }
5535         }
5536         if (dev->flags != old_flags) {
5537                 pr_info("device %s %s promiscuous mode\n",
5538                         dev->name,
5539                         dev->flags & IFF_PROMISC ? "entered" : "left");
5540                 if (audit_enabled) {
5541                         current_uid_gid(&uid, &gid);
5542                         audit_log(current->audit_context, GFP_ATOMIC,
5543                                 AUDIT_ANOM_PROMISCUOUS,
5544                                 "dev=%s prom=%d old_prom=%d auid=%u uid=%u gid=%u ses=%u",
5545                                 dev->name, (dev->flags & IFF_PROMISC),
5546                                 (old_flags & IFF_PROMISC),
5547                                 from_kuid(&init_user_ns, audit_get_loginuid(current)),
5548                                 from_kuid(&init_user_ns, uid),
5549                                 from_kgid(&init_user_ns, gid),
5550                                 audit_get_sessionid(current));
5551                 }
5552 
5553                 dev_change_rx_flags(dev, IFF_PROMISC);
5554         }
5555         if (notify)
5556                 __dev_notify_flags(dev, old_flags, IFF_PROMISC);
5557         return 0;
5558 }
5559 
5560 /**
5561  *      dev_set_promiscuity     - update promiscuity count on a device
5562  *      @dev: device
5563  *      @inc: modifier
5564  *
5565  *      Add or remove promiscuity from a device. While the count in the device
5566  *      remains above zero the interface remains promiscuous. Once it hits zero
5567  *      the device reverts back to normal filtering operation. A negative inc
5568  *      value is used to drop promiscuity on the device.
5569  *      Return 0 if successful or a negative errno code on error.
5570  */
5571 int dev_set_promiscuity(struct net_device *dev, int inc)
5572 {
5573         unsigned int old_flags = dev->flags;
5574         int err;
5575 
5576         err = __dev_set_promiscuity(dev, inc, true);
5577         if (err < 0)
5578                 return err;
5579         if (dev->flags != old_flags)
5580                 dev_set_rx_mode(dev);
5581         return err;
5582 }
5583 EXPORT_SYMBOL(dev_set_promiscuity);
5584 
5585 static int __dev_set_allmulti(struct net_device *dev, int inc, bool notify)
5586 {
5587         unsigned int old_flags = dev->flags, old_gflags = dev->gflags;
5588 
5589         ASSERT_RTNL();
5590 
5591         dev->flags |= IFF_ALLMULTI;
5592         dev->allmulti += inc;
5593         if (dev->allmulti == 0) {
5594                 /*
5595                  * Avoid overflow.
5596                  * If inc causes overflow, untouch allmulti and return error.
5597                  */
5598                 if (inc < 0)
5599                         dev->flags &= ~IFF_ALLMULTI;
5600                 else {
5601                         dev->allmulti -= inc;
5602                         pr_warn("%s: allmulti touches roof, set allmulti failed. allmulti feature of device might be broken.\n",
5603                                 dev->name);
5604                         return -EOVERFLOW;
5605                 }
5606         }
5607         if (dev->flags ^ old_flags) {
5608                 dev_change_rx_flags(dev, IFF_ALLMULTI);
5609                 dev_set_rx_mode(dev);
5610                 if (notify)
5611                         __dev_notify_flags(dev, old_flags,
5612                                            dev->gflags ^ old_gflags);
5613         }
5614         return 0;
5615 }
5616 
5617 /**
5618  *      dev_set_allmulti        - update allmulti count on a device
5619  *      @dev: device
5620  *      @inc: modifier
5621  *
5622  *      Add or remove reception of all multicast frames to a device. While the
5623  *      count in the device remains above zero the interface remains listening
5624  *      to all interfaces. Once it hits zero the device reverts back to normal
5625  *      filtering operation. A negative @inc value is used to drop the counter
5626  *      when releasing a resource needing all multicasts.
5627  *      Return 0 if successful or a negative errno code on error.
5628  */
5629 
5630 int dev_set_allmulti(struct net_device *dev, int inc)
5631 {
5632         return __dev_set_allmulti(dev, inc, true);
5633 }
5634 EXPORT_SYMBOL(dev_set_allmulti);
5635 
5636 /*
5637  *      Upload unicast and multicast address lists to device and
5638  *      configure RX filtering. When the device doesn't support unicast
5639  *      filtering it is put in promiscuous mode while unicast addresses
5640  *      are present.
5641  */
5642 void __dev_set_rx_mode(struct net_device *dev)
5643 {
5644         const struct net_device_ops *ops = dev->netdev_ops;
5645 
5646         /* dev_open will call this function so the list will stay sane. */
5647         if (!(dev->flags&IFF_UP))
5648                 return;
5649 
5650         if (!netif_device_present(dev))
5651                 return;
5652 
5653         if (!(dev->priv_flags & IFF_UNICAST_FLT)) {
5654                 /* Unicast addresses changes may only happen under the rtnl,
5655                  * therefore calling __dev_set_promiscuity here is safe.
5656                  */
5657                 if (!netdev_uc_empty(dev) && !dev->uc_promisc) {
5658                         __dev_set_promiscuity(dev, 1, false);
5659                         dev->uc_promisc = true;
5660                 } else if (netdev_uc_empty(dev) && dev->uc_promisc) {
5661                         __dev_set_promiscuity(dev, -1, false);
5662                         dev->uc_promisc = false;
5663                 }
5664         }
5665 
5666         if (ops->ndo_set_rx_mode)
5667                 ops->ndo_set_rx_mode(dev);
5668 }
5669 
5670 void dev_set_rx_mode(struct net_device *dev)
5671 {
5672         netif_addr_lock_bh(dev);
5673         __dev_set_rx_mode(dev);
5674         netif_addr_unlock_bh(dev);
5675 }
5676 
5677 /**
5678  *      dev_get_flags - get flags reported to userspace
5679  *      @dev: device
5680  *
5681  *      Get the combination of flag bits exported through APIs to userspace.
5682  */
5683 unsigned int dev_get_flags(const struct net_device *dev)
5684 {
5685         unsigned int flags;
5686 
5687         flags = (dev->flags & ~(IFF_PROMISC |
5688                                 IFF_ALLMULTI |
5689                                 IFF_RUNNING |
5690                                 IFF_LOWER_UP |
5691                                 IFF_DORMANT)) |
5692                 (dev->gflags & (IFF_PROMISC |
5693                                 IFF_ALLMULTI));
5694 
5695         if (netif_running(dev)) {
5696                 if (netif_oper_up(dev))
5697                         flags |= IFF_RUNNING;
5698                 if (netif_carrier_ok(dev))
5699                         flags |= IFF_LOWER_UP;
5700                 if (netif_dormant(dev))
5701                         flags |= IFF_DORMANT;
5702         }
5703 
5704         return flags;
5705 }
5706 EXPORT_SYMBOL(dev_get_flags);
5707 
5708 int __dev_change_flags(struct net_device *dev, unsigned int flags)
5709 {
5710         unsigned int old_flags = dev->flags;
5711         int ret;
5712 
5713         ASSERT_RTNL();
5714 
5715         /*
5716          *      Set the flags on our device.
5717          */
5718 
5719         dev->flags = (flags & (IFF_DEBUG | IFF_NOTRAILERS | IFF_NOARP |
5720                                IFF_DYNAMIC | IFF_MULTICAST | IFF_PORTSEL |
5721                                IFF_AUTOMEDIA)) |
5722                      (dev->flags & (IFF_UP | IFF_VOLATILE | IFF_PROMISC |
5723                                     IFF_ALLMULTI));
5724 
5725         /*
5726          *      Load in the correct multicast list now the flags have changed.
5727          */
5728 
5729         if ((old_flags ^ flags) & IFF_MULTICAST)
5730                 dev_change_rx_flags(dev, IFF_MULTICAST);
5731 
5732         dev_set_rx_mode(dev);
5733 
5734         /*
5735          *      Have we downed the interface. We handle IFF_UP ourselves
5736          *      according to user attempts to set it, rather than blindly
5737          *      setting it.
5738          */
5739 
5740         ret = 0;
5741         if ((old_flags ^ flags) & IFF_UP)
5742                 ret = ((old_flags & IFF_UP) ? __dev_close : __dev_open)(dev);
5743 
5744         if ((flags ^ dev->gflags) & IFF_PROMISC) {
5745                 int inc = (flags & IFF_PROMISC) ? 1 : -1;
5746                 unsigned int old_flags = dev->flags;
5747 
5748                 dev->gflags ^= IFF_PROMISC;
5749 
5750                 if (__dev_set_promiscuity(dev, inc, false) >= 0)
5751                         if (dev->flags != old_flags)
5752                                 dev_set_rx_mode(dev);
5753         }
5754 
5755         /* NOTE: order of synchronization of IFF_PROMISC and IFF_ALLMULTI
5756            is important. Some (broken) drivers set IFF_PROMISC, when
5757            IFF_ALLMULTI is requested not asking us and not reporting.
5758          */
5759         if ((flags ^ dev->gflags) & IFF_ALLMULTI) {
5760                 int inc = (flags & IFF_ALLMULTI) ? 1 : -1;
5761 
5762                 dev->gflags ^= IFF_ALLMULTI;
5763                 __dev_set_allmulti(dev, inc, false);
5764         }
5765 
5766         return ret;
5767 }
5768 
5769 void __dev_notify_flags(struct net_device *dev, unsigned int old_flags,
5770                         unsigned int gchanges)
5771 {
5772         unsigned int changes = dev->flags ^ old_flags;
5773 
5774         if (gchanges)
5775                 rtmsg_ifinfo(RTM_NEWLINK, dev, gchanges, GFP_ATOMIC);
5776 
5777         if (changes & IFF_UP) {
5778                 if (dev->flags & IFF_UP)
5779                         call_netdevice_notifiers(NETDEV_UP, dev);
5780                 else
5781                         call_netdevice_notifiers(NETDEV_DOWN, dev);
5782         }
5783 
5784         if (dev->flags & IFF_UP &&
5785             (changes & ~(IFF_UP | IFF_PROMISC | IFF_ALLMULTI | IFF_VOLATILE))) {
5786                 struct netdev_notifier_change_info change_info;
5787 
5788                 change_info.flags_changed = changes;
5789                 call_netdevice_notifiers_info(NETDEV_CHANGE, dev,
5790                                               &change_info.info);
5791         }
5792 }
5793 
5794 /**
5795  *      dev_change_flags - change device settings
5796  *      @dev: device
5797  *      @flags: device state flags
5798  *
5799  *      Change settings on device based state flags. The flags are
5800  *      in the userspace exported format.
5801  */
5802 int dev_change_flags(struct net_device *dev, unsigned int flags)
5803 {
5804         int ret;
5805         unsigned int changes, old_flags = dev->flags, old_gflags = dev->gflags;
5806 
5807         ret = __dev_change_flags(dev, flags);
5808         if (ret < 0)
5809                 return ret;
5810 
5811         changes = (old_flags ^ dev->flags) | (old_gflags ^ dev->gflags);
5812         __dev_notify_flags(dev, old_flags, changes);
5813         return ret;
5814 }
5815 EXPORT_SYMBOL(dev_change_flags);
5816 
5817 static int __dev_set_mtu(struct net_device *dev, int new_mtu)
5818 {
5819         const struct net_device_ops *ops = dev->netdev_ops;
5820 
5821         if (ops->ndo_change_mtu)
5822                 return ops->ndo_change_mtu(dev, new_mtu);
5823 
5824         dev->mtu = new_mtu;
5825         return 0;
5826 }
5827 
5828 /**
5829  *      dev_set_mtu - Change maximum transfer unit
5830  *      @dev: device
5831  *      @new_mtu: new transfer unit
5832  *
5833  *      Change the maximum transfer size of the network device.
5834  */
5835 int dev_set_mtu(struct net_device *dev, int new_mtu)
5836 {
5837         int err, orig_mtu;
5838 
5839         if (new_mtu == dev->mtu)
5840                 return 0;
5841 
5842         /*      MTU must be positive.    */
5843         if (new_mtu < 0)
5844                 return -EINVAL;
5845 
5846         if (!netif_device_present(dev))
5847                 return -ENODEV;
5848 
5849         err = call_netdevice_notifiers(NETDEV_PRECHANGEMTU, dev);
5850         err = notifier_to_errno(err);
5851         if (err)
5852                 return err;
5853 
5854         orig_mtu = dev->mtu;
5855         err = __dev_set_mtu(dev, new_mtu);
5856 
5857         if (!err) {
5858                 err = call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
5859                 err = notifier_to_errno(err);
5860                 if (err) {
5861                         /* setting mtu back and notifying everyone again,
5862                          * so that they have a chance to revert changes.
5863                          */
5864                         __dev_set_mtu(dev, orig_mtu);
5865                         call_netdevice_notifiers(NETDEV_CHANGEMTU, dev);
5866                 }
5867         }
5868         return err;
5869 }
5870 EXPORT_SYMBOL(dev_set_mtu);
5871 
5872 /**
5873  *      dev_set_group - Change group this device belongs to
5874  *      @dev: device
5875  *      @new_group: group this device should belong to
5876  */
5877 void dev_set_group(struct net_device *dev, int new_group)
5878 {
5879         dev->group = new_group;
5880 }
5881 EXPORT_SYMBOL(dev_set_group);
5882 
5883 /**
5884  *      dev_set_mac_address - Change Media Access Control Address
5885  *      @dev: device
5886  *      @sa: new address
5887  *
5888  *      Change the hardware (MAC) address of the device
5889  */
5890 int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
5891 {
5892         const struct net_device_ops *ops = dev->netdev_ops;
5893         int err;
5894 
5895         if (!ops->ndo_set_mac_address)
5896                 return -EOPNOTSUPP;
5897         if (sa->sa_family != dev->type)
5898                 return -EINVAL;
5899         if (!netif_device_present(dev))
5900                 return -ENODEV;
5901         err = ops->ndo_set_mac_address(dev, sa);
5902         if (err)
5903                 return err;
5904         dev->addr_assign_type = NET_ADDR_SET;
5905         call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
5906         add_device_randomness(dev->dev_addr, dev->addr_len);
5907         return 0;
5908 }
5909 EXPORT_SYMBOL(dev_set_mac_address);
5910 
5911 /**
5912  *      dev_change_carrier - Change device carrier
5913  *      @dev: device
5914  *      @new_carrier: new value
5915  *
5916  *      Change device carrier
5917  */
5918 int dev_change_carrier(struct net_device *dev, bool new_carrier)
5919 {
5920         const struct net_device_ops *ops = dev->netdev_ops;
5921 
5922         if (!ops->ndo_change_carrier)
5923                 return -EOPNOTSUPP;
5924         if (!netif_device_present(dev))
5925                 return -ENODEV;
5926         return ops->ndo_change_carrier(dev, new_carrier);
5927 }
5928 EXPORT_SYMBOL(dev_change_carrier);
5929 
5930 /**
5931  *      dev_get_phys_port_id - Get device physical port ID
5932  *      @dev: device
5933  *      @ppid: port ID
5934  *
5935  *      Get device physical port ID
5936  */
5937 int dev_get_phys_port_id(struct net_device *dev,
5938                          struct netdev_phys_item_id *ppid)
5939 {
5940         const struct net_device_ops *ops = dev->netdev_ops;
5941 
5942         if (!ops->ndo_get_phys_port_id)
5943                 return -EOPNOTSUPP;
5944         return ops->ndo_get_phys_port_id(dev, ppid);
5945 }
5946 EXPORT_SYMBOL(dev_get_phys_port_id);
5947 
5948 /**
5949  *      dev_get_phys_port_name - Get device physical port name
5950  *      @dev: device
5951  *      @name: port name
5952  *
5953  *      Get device physical port name
5954  */
5955 int dev_get_phys_port_name(struct net_device *dev,
5956                            char *name, size_t len)
5957 {
5958         const struct net_device_ops *ops = dev->netdev_ops;
5959 
5960         if (!ops->ndo_get_phys_port_name)
5961                 return -EOPNOTSUPP;
5962         return ops->ndo_get_phys_port_name(dev, name, len);
5963 }
5964 EXPORT_SYMBOL(dev_get_phys_port_name);
5965 
5966 /**
5967  *      dev_new_index   -       allocate an ifindex
5968  *      @net: the applicable net namespace
5969  *
5970  *      Returns a suitable unique value for a new device interface
5971  *      number.  The caller must hold the rtnl semaphore or the
5972  *      dev_base_lock to be sure it remains unique.
5973  */
5974 static int dev_new_index(struct net *net)
5975 {
5976         int ifindex = net->ifindex;
5977         for (;;) {
5978                 if (++ifindex <= 0)
5979                         ifindex = 1;
5980                 if (!__dev_get_by_index(net, ifindex))
5981                         return net->ifindex = ifindex;
5982         }
5983 }
5984 
5985 /* Delayed registration/unregisteration */
5986 static LIST_HEAD(net_todo_list);
5987 DECLARE_WAIT_QUEUE_HEAD(netdev_unregistering_wq);
5988 
5989 static void net_set_todo(struct net_device *dev)
5990 {
5991         list_add_tail(&dev->todo_list, &net_todo_list);
5992         dev_net(dev)->dev_unreg_count++;
5993 }
5994 
5995 static void rollback_registered_many(struct list_head *head)
5996 {
5997         struct net_device *dev, *tmp;
5998         LIST_HEAD(close_head);
5999 
6000         BUG_ON(dev_boot_phase);
6001         ASSERT_RTNL();
6002 
6003         list_for_each_entry_safe(dev, tmp, head, unreg_list) {
6004                 /* Some devices call without registering
6005                  * for initialization unwind. Remove those
6006                  * devices and proceed with the remaining.
6007                  */
6008                 if (dev->reg_state == NETREG_UNINITIALIZED) {
6009                         pr_debug("unregister_netdevice: device %s/%p never was registered\n",
6010                                  dev->name, dev);
6011 
6012                         WARN_ON(1);
6013                         list_del(&dev->unreg_list);
6014                         continue;
6015                 }
6016                 dev->dismantle = true;
6017                 BUG_ON(dev->reg_state != NETREG_REGISTERED);
6018         }
6019 
6020         /* If device is running, close it first. */
6021         list_for_each_entry(dev, head, unreg_list)
6022                 list_add_tail(&dev->close_list, &close_head);
6023         dev_close_many(&close_head, true);
6024 
6025         list_for_each_entry(dev, head, unreg_list) {
6026                 /* And unlink it from device chain. */
6027                 unlist_netdevice(dev);
6028 
6029                 dev->reg_state = NETREG_UNREGISTERING;
6030         }
6031 
6032         synchronize_net();
6033 
6034         list_for_each_entry(dev, head, unreg_list) {
6035                 struct sk_buff *skb = NULL;
6036 
6037                 /* Shutdown queueing discipline. */
6038                 dev_shutdown(dev);
6039 
6040 
6041                 /* Notify protocols, that we are about to destroy
6042                    this device. They should clean all the things.
6043                 */
6044                 call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6045 
6046                 if (!dev->rtnl_link_ops ||
6047                     dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
6048                         skb = rtmsg_ifinfo_build_skb(RTM_DELLINK, dev, ~0U,
6049                                                      GFP_KERNEL);
6050 
6051                 /*
6052                  *      Flush the unicast and multicast chains
6053                  */
6054                 dev_uc_flush(dev);
6055                 dev_mc_flush(dev);
6056 
6057                 if (dev->netdev_ops->ndo_uninit)
6058                         dev->netdev_ops->ndo_uninit(dev);
6059 
6060                 if (skb)
6061                         rtmsg_ifinfo_send(skb, dev, GFP_KERNEL);
6062 
6063                 /* Notifier chain MUST detach us all upper devices. */
6064                 WARN_ON(netdev_has_any_upper_dev(dev));
6065 
6066                 /* Remove entries from kobject tree */
6067                 netdev_unregister_kobject(dev);
6068 #ifdef CONFIG_XPS
6069                 /* Remove XPS queueing entries */
6070                 netif_reset_xps_queues_gt(dev, 0);
6071 #endif
6072         }
6073 
6074         synchronize_net();
6075 
6076         list_for_each_entry(dev, head, unreg_list)
6077                 dev_put(dev);
6078 }
6079 
6080 static void rollback_registered(struct net_device *dev)
6081 {
6082         LIST_HEAD(single);
6083 
6084         list_add(&dev->unreg_list, &single);
6085         rollback_registered_many(&single);
6086         list_del(&single);
6087 }
6088 
6089 static netdev_features_t netdev_fix_features(struct net_device *dev,
6090         netdev_features_t features)
6091 {
6092         /* Fix illegal checksum combinations */
6093         if ((features & NETIF_F_HW_CSUM) &&
6094             (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
6095                 netdev_warn(dev, "mixed HW and IP checksum settings.\n");
6096                 features &= ~(NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM);
6097         }
6098 
6099         /* TSO requires that SG is present as well. */
6100         if ((features & NETIF_F_ALL_TSO) && !(features & NETIF_F_SG)) {
6101                 netdev_dbg(dev, "Dropping TSO features since no SG feature.\n");
6102                 features &= ~NETIF_F_ALL_TSO;
6103         }
6104 
6105         if ((features & NETIF_F_TSO) && !(features & NETIF_F_HW_CSUM) &&
6106                                         !(features & NETIF_F_IP_CSUM)) {
6107                 netdev_dbg(dev, "Dropping TSO features since no CSUM feature.\n");
6108                 features &= ~NETIF_F_TSO;
6109                 features &= ~NETIF_F_TSO_ECN;
6110         }
6111 
6112         if ((features & NETIF_F_TSO6) && !(features & NETIF_F_HW_CSUM) &&
6113                                          !(features & NETIF_F_IPV6_CSUM)) {
6114                 netdev_dbg(dev, "Dropping TSO6 features since no CSUM feature.\n");
6115                 features &= ~NETIF_F_TSO6;
6116         }
6117 
6118         /* TSO ECN requires that TSO is present as well. */
6119         if ((features & NETIF_F_ALL_TSO) == NETIF_F_TSO_ECN)
6120                 features &= ~NETIF_F_TSO_ECN;
6121 
6122         /* Software GSO depends on SG. */
6123         if ((features & NETIF_F_GSO) && !(features & NETIF_F_SG)) {
6124                 netdev_dbg(dev, "Dropping NETIF_F_GSO since no SG feature.\n");
6125                 features &= ~NETIF_F_GSO;
6126         }
6127 
6128         /* UFO needs SG and checksumming */
6129         if (features & NETIF_F_UFO) {
6130                 /* maybe split UFO into V4 and V6? */
6131                 if (!((features & NETIF_F_GEN_CSUM) ||
6132                     (features & (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))
6133                             == (NETIF_F_IP_CSUM|NETIF_F_IPV6_CSUM))) {
6134                         netdev_dbg(dev,
6135                                 "Dropping NETIF_F_UFO since no checksum offload features.\n");
6136                         features &= ~NETIF_F_UFO;
6137                 }
6138 
6139                 if (!(features & NETIF_F_SG)) {
6140                         netdev_dbg(dev,
6141                                 "Dropping NETIF_F_UFO since no NETIF_F_SG feature.\n");
6142                         features &= ~NETIF_F_UFO;
6143                 }
6144         }
6145 
6146 #ifdef CONFIG_NET_RX_BUSY_POLL
6147         if (dev->netdev_ops->ndo_busy_poll)
6148                 features |= NETIF_F_BUSY_POLL;
6149         else
6150 #endif
6151                 features &= ~NETIF_F_BUSY_POLL;
6152 
6153         return features;
6154 }
6155 
6156 int __netdev_update_features(struct net_device *dev)
6157 {
6158         netdev_features_t features;
6159         int err = 0;
6160 
6161         ASSERT_RTNL();
6162 
6163         features = netdev_get_wanted_features(dev);
6164 
6165         if (dev->netdev_ops->ndo_fix_features)
6166                 features = dev->netdev_ops->ndo_fix_features(dev, features);
6167 
6168         /* driver might be less strict about feature dependencies */
6169         features = netdev_fix_features(dev, features);
6170 
6171         if (dev->features == features)
6172                 return 0;
6173 
6174         netdev_dbg(dev, "Features changed: %pNF -> %pNF\n",
6175                 &dev->features, &features);
6176 
6177         if (dev->netdev_ops->ndo_set_features)
6178                 err = dev->netdev_ops->ndo_set_features(dev, features);
6179 
6180         if (unlikely(err < 0)) {
6181                 netdev_err(dev,
6182                         "set_features() failed (%d); wanted %pNF, left %pNF\n",
6183                         err, &features, &dev->features);
6184                 return -1;
6185         }
6186 
6187         if (!err)
6188                 dev->features = features;
6189 
6190         return 1;
6191 }
6192 
6193 /**
6194  *      netdev_update_features - recalculate device features
6195  *      @dev: the device to check
6196  *
6197  *      Recalculate dev->features set and send notifications if it
6198  *      has changed. Should be called after driver or hardware dependent
6199  *      conditions might have changed that influence the features.
6200  */
6201 void netdev_update_features(struct net_device *dev)
6202 {
6203         if (__netdev_update_features(dev))
6204                 netdev_features_change(dev);
6205 }
6206 EXPORT_SYMBOL(netdev_update_features);
6207 
6208 /**
6209  *      netdev_change_features - recalculate device features
6210  *      @dev: the device to check
6211  *
6212  *      Recalculate dev->features set and send notifications even
6213  *      if they have not changed. Should be called instead of
6214  *      netdev_update_features() if also dev->vlan_features might
6215  *      have changed to allow the changes to be propagated to stacked
6216  *      VLAN devices.
6217  */
6218 void netdev_change_features(struct net_device *dev)
6219 {
6220         __netdev_update_features(dev);
6221         netdev_features_change(dev);
6222 }
6223 EXPORT_SYMBOL(netdev_change_features);
6224 
6225 /**
6226  *      netif_stacked_transfer_operstate -      transfer operstate
6227  *      @rootdev: the root or lower level device to transfer state from
6228  *      @dev: the device to transfer operstate to
6229  *
6230  *      Transfer operational state from root to device. This is normally
6231  *      called when a stacking relationship exists between the root
6232  *      device and the device(a leaf device).
6233  */
6234 void netif_stacked_transfer_operstate(const struct net_device *rootdev,
6235                                         struct net_device *dev)
6236 {
6237         if (rootdev->operstate == IF_OPER_DORMANT)
6238                 netif_dormant_on(dev);
6239         else
6240                 netif_dormant_off(dev);
6241 
6242         if (netif_carrier_ok(rootdev)) {
6243                 if (!netif_carrier_ok(dev))
6244                         netif_carrier_on(dev);
6245         } else {
6246                 if (netif_carrier_ok(dev))
6247                         netif_carrier_off(dev);
6248         }
6249 }
6250 EXPORT_SYMBOL(netif_stacked_transfer_operstate);
6251 
6252 #ifdef CONFIG_SYSFS
6253 static int netif_alloc_rx_queues(struct net_device *dev)
6254 {
6255         unsigned int i, count = dev->num_rx_queues;
6256         struct netdev_rx_queue *rx;
6257         size_t sz = count * sizeof(*rx);
6258 
6259         BUG_ON(count < 1);
6260 
6261         rx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6262         if (!rx) {
6263                 rx = vzalloc(sz);
6264                 if (!rx)
6265                         return -ENOMEM;
6266         }
6267         dev->_rx = rx;
6268 
6269         for (i = 0; i < count; i++)
6270                 rx[i].dev = dev;
6271         return 0;
6272 }
6273 #endif
6274 
6275 static void netdev_init_one_queue(struct net_device *dev,
6276                                   struct netdev_queue *queue, void *_unused)
6277 {
6278         /* Initialize queue lock */
6279         spin_lock_init(&queue->_xmit_lock);
6280         netdev_set_xmit_lockdep_class(&queue->_xmit_lock, dev->type);
6281         queue->xmit_lock_owner = -1;
6282         netdev_queue_numa_node_write(queue, NUMA_NO_NODE);
6283         queue->dev = dev;
6284 #ifdef CONFIG_BQL
6285         dql_init(&queue->dql, HZ);
6286 #endif
6287 }
6288 
6289 static void netif_free_tx_queues(struct net_device *dev)
6290 {
6291         kvfree(dev->_tx);
6292 }
6293 
6294 static int netif_alloc_netdev_queues(struct net_device *dev)
6295 {
6296         unsigned int count = dev->num_tx_queues;
6297         struct netdev_queue *tx;
6298         size_t sz = count * sizeof(*tx);
6299 
6300         BUG_ON(count < 1 || count > 0xffff);
6301 
6302         tx = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6303         if (!tx) {
6304                 tx = vzalloc(sz);
6305                 if (!tx)
6306                         return -ENOMEM;
6307         }
6308         dev->_tx = tx;
6309 
6310         netdev_for_each_tx_queue(dev, netdev_init_one_queue, NULL);
6311         spin_lock_init(&dev->tx_global_lock);
6312 
6313         return 0;
6314 }
6315 
6316 /**
6317  *      register_netdevice      - register a network device
6318  *      @dev: device to register
6319  *
6320  *      Take a completed network device structure and add it to the kernel
6321  *      interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6322  *      chain. 0 is returned on success. A negative errno code is returned
6323  *      on a failure to set up the device, or if the name is a duplicate.
6324  *
6325  *      Callers must hold the rtnl semaphore. You may want
6326  *      register_netdev() instead of this.
6327  *
6328  *      BUGS:
6329  *      The locking appears insufficient to guarantee two parallel registers
6330  *      will not get the same name.
6331  */
6332 
6333 int register_netdevice(struct net_device *dev)
6334 {
6335         int ret;
6336         struct net *net = dev_net(dev);
6337 
6338         BUG_ON(dev_boot_phase);
6339         ASSERT_RTNL();
6340 
6341         might_sleep();
6342 
6343         /* When net_device's are persistent, this will be fatal. */
6344         BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
6345         BUG_ON(!net);
6346 
6347         spin_lock_init(&dev->addr_list_lock);
6348         netdev_set_addr_lockdep_class(dev);
6349 
6350         ret = dev_get_valid_name(net, dev, dev->name);
6351         if (ret < 0)
6352                 goto out;
6353 
6354         /* Init, if this function is available */
6355         if (dev->netdev_ops->ndo_init) {
6356                 ret = dev->netdev_ops->ndo_init(dev);
6357                 if (ret) {
6358                         if (ret > 0)
6359                                 ret = -EIO;
6360                         goto out;
6361                 }
6362         }
6363 
6364         if (((dev->hw_features | dev->features) &
6365              NETIF_F_HW_VLAN_CTAG_FILTER) &&
6366             (!dev->netdev_ops->ndo_vlan_rx_add_vid ||
6367              !dev->netdev_ops->ndo_vlan_rx_kill_vid)) {
6368                 netdev_WARN(dev, "Buggy VLAN acceleration in driver!\n");
6369                 ret = -EINVAL;
6370                 goto err_uninit;
6371         }
6372 
6373         ret = -EBUSY;
6374         if (!dev->ifindex)
6375                 dev->ifindex = dev_new_index(net);
6376         else if (__dev_get_by_index(net, dev->ifindex))
6377                 goto err_uninit;
6378 
6379         /* Transfer changeable features to wanted_features and enable
6380          * software offloads (GSO and GRO).
6381          */
6382         dev->hw_features |= NETIF_F_SOFT_FEATURES;
6383         dev->features |= NETIF_F_SOFT_FEATURES;
6384         dev->wanted_features = dev->features & dev->hw_features;
6385 
6386         if (!(dev->flags & IFF_LOOPBACK)) {
6387                 dev->hw_features |= NETIF_F_NOCACHE_COPY;
6388         }
6389 
6390         /* Make NETIF_F_HIGHDMA inheritable to VLAN devices.
6391          */
6392         dev->vlan_features |= NETIF_F_HIGHDMA;
6393 
6394         /* Make NETIF_F_SG inheritable to tunnel devices.
6395          */
6396         dev->hw_enc_features |= NETIF_F_SG;
6397 
6398         /* Make NETIF_F_SG inheritable to MPLS.
6399          */
6400         dev->mpls_features |= NETIF_F_SG;
6401 
6402         ret = call_netdevice_notifiers(NETDEV_POST_INIT, dev);
6403         ret = notifier_to_errno(ret);
6404         if (ret)
6405                 goto err_uninit;
6406 
6407         ret = netdev_register_kobject(dev);
6408         if (ret)
6409                 goto err_uninit;
6410         dev->reg_state = NETREG_REGISTERED;
6411 
6412         __netdev_update_features(dev);
6413 
6414         /*
6415          *      Default initial state at registry is that the
6416          *      device is present.
6417          */
6418 
6419         set_bit(__LINK_STATE_PRESENT, &dev->state);
6420 
6421         linkwatch_init_dev(dev);
6422 
6423         dev_init_scheduler(dev);
6424         dev_hold(dev);
6425         list_netdevice(dev);
6426         add_device_randomness(dev->dev_addr, dev->addr_len);
6427 
6428         /* If the device has permanent device address, driver should
6429          * set dev_addr and also addr_assign_type should be set to
6430          * NET_ADDR_PERM (default value).
6431          */
6432         if (dev->addr_assign_type == NET_ADDR_PERM)
6433                 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
6434 
6435         /* Notify protocols, that a new device appeared. */
6436         ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
6437         ret = notifier_to_errno(ret);
6438         if (ret) {
6439                 rollback_registered(dev);
6440                 dev->reg_state = NETREG_UNREGISTERED;
6441         }
6442         /*
6443          *      Prevent userspace races by waiting until the network
6444          *      device is fully setup before sending notifications.
6445          */
6446         if (!dev->rtnl_link_ops ||
6447             dev->rtnl_link_state == RTNL_LINK_INITIALIZED)
6448                 rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
6449 
6450 out:
6451         return ret;
6452 
6453 err_uninit:
6454         if (dev->netdev_ops->ndo_uninit)
6455                 dev->netdev_ops->ndo_uninit(dev);
6456         goto out;
6457 }
6458 EXPORT_SYMBOL(register_netdevice);
6459 
6460 /**
6461  *      init_dummy_netdev       - init a dummy network device for NAPI
6462  *      @dev: device to init
6463  *
6464  *      This takes a network device structure and initialize the minimum
6465  *      amount of fields so it can be used to schedule NAPI polls without
6466  *      registering a full blown interface. This is to be used by drivers
6467  *      that need to tie several hardware interfaces to a single NAPI
6468  *      poll scheduler due to HW limitations.
6469  */
6470 int init_dummy_netdev(struct net_device *dev)
6471 {
6472         /* Clear everything. Note we don't initialize spinlocks
6473          * are they aren't supposed to be taken by any of the
6474          * NAPI code and this dummy netdev is supposed to be
6475          * only ever used for NAPI polls
6476          */
6477         memset(dev, 0, sizeof(struct net_device));
6478 
6479         /* make sure we BUG if trying to hit standard
6480          * register/unregister code path
6481          */
6482         dev->reg_state = NETREG_DUMMY;
6483 
6484         /* NAPI wants this */
6485         INIT_LIST_HEAD(&dev->napi_list);
6486 
6487         /* a dummy interface is started by default */
6488         set_bit(__LINK_STATE_PRESENT, &dev->state);
6489         set_bit(__LINK_STATE_START, &dev->state);
6490 
6491         /* Note : We dont allocate pcpu_refcnt for dummy devices,
6492          * because users of this 'device' dont need to change
6493          * its refcount.
6494          */
6495 
6496         return 0;
6497 }
6498 EXPORT_SYMBOL_GPL(init_dummy_netdev);
6499 
6500 
6501 /**
6502  *      register_netdev - register a network device
6503  *      @dev: device to register
6504  *
6505  *      Take a completed network device structure and add it to the kernel
6506  *      interfaces. A %NETDEV_REGISTER message is sent to the netdev notifier
6507  *      chain. 0 is returned on success. A negative errno code is returned
6508  *      on a failure to set up the device, or if the name is a duplicate.
6509  *
6510  *      This is a wrapper around register_netdevice that takes the rtnl semaphore
6511  *      and expands the device name if you passed a format string to
6512  *      alloc_netdev.
6513  */
6514 int register_netdev(struct net_device *dev)
6515 {
6516         int err;
6517 
6518         rtnl_lock();
6519         err = register_netdevice(dev);
6520         rtnl_unlock();
6521         return err;
6522 }
6523 EXPORT_SYMBOL(register_netdev);
6524 
6525 int netdev_refcnt_read(const struct net_device *dev)
6526 {
6527         int i, refcnt = 0;
6528 
6529         for_each_possible_cpu(i)
6530                 refcnt += *per_cpu_ptr(dev->pcpu_refcnt, i);
6531         return refcnt;
6532 }
6533 EXPORT_SYMBOL(netdev_refcnt_read);
6534 
6535 /**
6536  * netdev_wait_allrefs - wait until all references are gone.
6537  * @dev: target net_device
6538  *
6539  * This is called when unregistering network devices.
6540  *
6541  * Any protocol or device that holds a reference should register
6542  * for netdevice notification, and cleanup and put back the
6543  * reference if they receive an UNREGISTER event.
6544  * We can get stuck here if buggy protocols don't correctly
6545  * call dev_put.
6546  */
6547 static void netdev_wait_allrefs(struct net_device *dev)
6548 {
6549         unsigned long rebroadcast_time, warning_time;
6550         int refcnt;
6551 
6552         linkwatch_forget_dev(dev);
6553 
6554         rebroadcast_time = warning_time = jiffies;
6555         refcnt = netdev_refcnt_read(dev);
6556 
6557         while (refcnt != 0) {
6558                 if (time_after(jiffies, rebroadcast_time + 1 * HZ)) {
6559                         rtnl_lock();
6560 
6561                         /* Rebroadcast unregister notification */
6562                         call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
6563 
6564                         __rtnl_unlock();
6565                         rcu_barrier();
6566                         rtnl_lock();
6567 
6568                         call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
6569                         if (test_bit(__LINK_STATE_LINKWATCH_PENDING,
6570                                      &dev->state)) {
6571                                 /* We must not have linkwatch events
6572                                  * pending on unregister. If this
6573                                  * happens, we simply run the queue
6574                                  * unscheduled, resulting in a noop
6575                                  * for this device.
6576                                  */
6577                                 linkwatch_run_queue();
6578                         }
6579 
6580                         __rtnl_unlock();
6581 
6582                         rebroadcast_time = jiffies;
6583                 }
6584 
6585                 msleep(250);
6586 
6587                 refcnt = netdev_refcnt_read(dev);
6588 
6589                 if (time_after(jiffies, warning_time + 10 * HZ)) {
6590                         pr_emerg("unregister_netdevice: waiting for %s to become free. Usage count = %d\n",
6591                                  dev->name, refcnt);
6592                         warning_time = jiffies;
6593                 }
6594         }
6595 }
6596 
6597 /* The sequence is:
6598  *
6599  *      rtnl_lock();
6600  *      ...
6601  *      register_netdevice(x1);
6602  *      register_netdevice(x2);
6603  *      ...
6604  *      unregister_netdevice(y1);
6605  *      unregister_netdevice(y2);
6606  *      ...
6607  *      rtnl_unlock();
6608  *      free_netdev(y1);
6609  *      free_netdev(y2);
6610  *
6611  * We are invoked by rtnl_unlock().
6612  * This allows us to deal with problems:
6613  * 1) We can delete sysfs objects which invoke hotplug
6614  *    without deadlocking with linkwatch via keventd.
6615  * 2) Since we run with the RTNL semaphore not held, we can sleep
6616  *    safely in order to wait for the netdev refcnt to drop to zero.
6617  *
6618  * We must not return until all unregister events added during
6619  * the interval the lock was held have been completed.
6620  */
6621 void netdev_run_todo(void)
6622 {
6623         struct list_head list;
6624 
6625         /* Snapshot list, allow later requests */
6626         list_replace_init(&net_todo_list, &list);
6627 
6628         __rtnl_unlock();
6629 
6630 
6631         /* Wait for rcu callbacks to finish before next phase */
6632         if (!list_empty(&list))
6633                 rcu_barrier();
6634 
6635         while (!list_empty(&list)) {
6636                 struct net_device *dev
6637                         = list_first_entry(&list, struct net_device, todo_list);
6638                 list_del(&dev->todo_list);
6639 
6640                 rtnl_lock();
6641                 call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
6642                 __rtnl_unlock();
6643 
6644                 if (unlikely(dev->reg_state != NETREG_UNREGISTERING)) {
6645                         pr_err("network todo '%s' but state %d\n",
6646                                dev->name, dev->reg_state);
6647                         dump_stack();
6648                         continue;
6649                 }
6650 
6651                 dev->reg_state = NETREG_UNREGISTERED;
6652 
6653                 on_each_cpu(flush_backlog, dev, 1);
6654 
6655                 netdev_wait_allrefs(dev);
6656 
6657                 /* paranoia */
6658                 BUG_ON(netdev_refcnt_read(dev));
6659                 BUG_ON(!list_empty(&dev->ptype_all));
6660                 BUG_ON(!list_empty(&dev->ptype_specific));
6661                 WARN_ON(rcu_access_pointer(dev->ip_ptr));
6662                 WARN_ON(rcu_access_pointer(dev->ip6_ptr));
6663                 WARN_ON(dev->dn_ptr);
6664 
6665                 if (dev->destructor)
6666                         dev->destructor(dev);
6667 
6668                 /* Report a network device has been unregistered */
6669                 rtnl_lock();
6670                 dev_net(dev)->dev_unreg_count--;
6671                 __rtnl_unlock();
6672                 wake_up(&netdev_unregistering_wq);
6673 
6674                 /* Free network device */
6675                 kobject_put(&dev->dev.kobj);
6676         }
6677 }
6678 
6679 /* Convert net_device_stats to rtnl_link_stats64.  They have the same
6680  * fields in the same order, with only the type differing.
6681  */
6682 void netdev_stats_to_stats64(struct rtnl_link_stats64 *stats64,
6683                              const struct net_device_stats *netdev_stats)
6684 {
6685 #if BITS_PER_LONG == 64
6686         BUILD_BUG_ON(sizeof(*stats64) != sizeof(*netdev_stats));
6687         memcpy(stats64, netdev_stats, sizeof(*stats64));
6688 #else
6689         size_t i, n = sizeof(*stats64) / sizeof(u64);
6690         const unsigned long *src = (const unsigned long *)netdev_stats;
6691         u64 *dst = (u64 *)stats64;
6692 
6693         BUILD_BUG_ON(sizeof(*netdev_stats) / sizeof(unsigned long) !=
6694                      sizeof(*stats64) / sizeof(u64));
6695         for (i = 0; i < n; i++)
6696                 dst[i] = src[i];
6697 #endif
6698 }
6699 EXPORT_SYMBOL(netdev_stats_to_stats64);
6700 
6701 /**
6702  *      dev_get_stats   - get network device statistics
6703  *      @dev: device to get statistics from
6704  *      @storage: place to store stats
6705  *
6706  *      Get network statistics from device. Return @storage.
6707  *      The device driver may provide its own method by setting
6708  *      dev->netdev_ops->get_stats64 or dev->netdev_ops->get_stats;
6709  *      otherwise the internal statistics structure is used.
6710  */
6711 struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
6712                                         struct rtnl_link_stats64 *storage)
6713 {
6714         const struct net_device_ops *ops = dev->netdev_ops;
6715 
6716         if (ops->ndo_get_stats64) {
6717                 memset(storage, 0, sizeof(*storage));
6718                 ops->ndo_get_stats64(dev, storage);
6719         } else if (ops->ndo_get_stats) {
6720                 netdev_stats_to_stats64(storage, ops->ndo_get_stats(dev));
6721         } else {
6722                 netdev_stats_to_stats64(storage, &dev->stats);
6723         }
6724         storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
6725         storage->tx_dropped += atomic_long_read(&dev->tx_dropped);
6726         return storage;
6727 }
6728 EXPORT_SYMBOL(dev_get_stats);
6729 
6730 struct netdev_queue *dev_ingress_queue_create(struct net_device *dev)
6731 {
6732         struct netdev_queue *queue = dev_ingress_queue(dev);
6733 
6734 #ifdef CONFIG_NET_CLS_ACT
6735         if (queue)
6736                 return queue;
6737         queue = kzalloc(sizeof(*queue), GFP_KERNEL);
6738         if (!queue)
6739                 return NULL;
6740         netdev_init_one_queue(dev, queue, NULL);
6741         RCU_INIT_POINTER(queue->qdisc, &noop_qdisc);
6742         queue->qdisc_sleeping = &noop_qdisc;
6743         rcu_assign_pointer(dev->ingress_queue, queue);
6744 #endif
6745         return queue;
6746 }
6747 
6748 static const struct ethtool_ops default_ethtool_ops;
6749 
6750 void netdev_set_default_ethtool_ops(struct net_device *dev,
6751                                     const struct ethtool_ops *ops)
6752 {
6753         if (dev->ethtool_ops == &default_ethtool_ops)
6754                 dev->ethtool_ops = ops;
6755 }
6756 EXPORT_SYMBOL_GPL(netdev_set_default_ethtool_ops);
6757 
6758 void netdev_freemem(struct net_device *dev)
6759 {
6760         char *addr = (char *)dev - dev->padded;
6761 
6762         kvfree(addr);
6763 }
6764 
6765 /**
6766  *      alloc_netdev_mqs - allocate network device
6767  *      @sizeof_priv:           size of private data to allocate space for
6768  *      @name:                  device name format string
6769  *      @name_assign_type:      origin of device name
6770  *      @setup:                 callback to initialize device
6771  *      @txqs:                  the number of TX subqueues to allocate
6772  *      @rxqs:                  the number of RX subqueues to allocate
6773  *
6774  *      Allocates a struct net_device with private data area for driver use
6775  *      and performs basic initialization.  Also allocates subqueue structs
6776  *      for each queue on the device.
6777  */
6778 struct net_device *alloc_netdev_mqs(int sizeof_priv, const char *name,
6779                 unsigned char name_assign_type,
6780                 void (*setup)(struct net_device *),
6781                 unsigned int txqs, unsigned int rxqs)
6782 {
6783         struct net_device *dev;
6784         size_t alloc_size;
6785         struct net_device *p;
6786 
6787         BUG_ON(strlen(name) >= sizeof(dev->name));
6788 
6789         if (txqs < 1) {
6790                 pr_err("alloc_netdev: Unable to allocate device with zero queues\n");
6791                 return NULL;
6792         }
6793 
6794 #ifdef CONFIG_SYSFS
6795         if (rxqs < 1) {
6796                 pr_err("alloc_netdev: Unable to allocate device with zero RX queues\n");
6797                 return NULL;
6798         }
6799 #endif
6800 
6801         alloc_size = sizeof(struct net_device);
6802         if (sizeof_priv) {
6803                 /* ensure 32-byte alignment of private area */
6804                 alloc_size = ALIGN(alloc_size, NETDEV_ALIGN);
6805                 alloc_size += sizeof_priv;
6806         }
6807         /* ensure 32-byte alignment of whole construct */
6808         alloc_size += NETDEV_ALIGN - 1;
6809 
6810         p = kzalloc(alloc_size, GFP_KERNEL | __GFP_NOWARN | __GFP_REPEAT);
6811         if (!p)
6812                 p = vzalloc(alloc_size);
6813         if (!p)
6814                 return NULL;
6815 
6816         dev = PTR_ALIGN(p, NETDEV_ALIGN);
6817         dev->padded = (char *)dev - (char *)p;
6818 
6819         dev->pcpu_refcnt = alloc_percpu(int);
6820         if (!dev->pcpu_refcnt)
6821                 goto free_dev;
6822 
6823         if (dev_addr_init(dev))
6824                 goto free_pcpu;
6825 
6826         dev_mc_init(dev);
6827         dev_uc_init(dev);
6828 
6829         dev_net_set(dev, &init_net);
6830 
6831         dev->gso_max_size = GSO_MAX_SIZE;
6832         dev->gso_max_segs = GSO_MAX_SEGS;
6833         dev->gso_min_segs = 0;
6834 
6835         INIT_LIST_HEAD(&dev->napi_list);
6836         INIT_LIST_HEAD(&dev->unreg_list);
6837         INIT_LIST_HEAD(&dev->close_list);
6838         INIT_LIST_HEAD(&dev->link_watch_list);
6839         INIT_LIST_HEAD(&dev->adj_list.upper);
6840         INIT_LIST_HEAD(&dev->adj_list.lower);
6841         INIT_LIST_HEAD(&dev->all_adj_list.upper);
6842         INIT_LIST_HEAD(&dev->all_adj_list.lower);
6843         INIT_LIST_HEAD(&dev->ptype_all);
6844         INIT_LIST_HEAD(&dev->ptype_specific);
6845         dev->priv_flags = IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM;
6846         setup(dev);
6847 
6848         dev->num_tx_queues = txqs;
6849         dev->real_num_tx_queues = txqs;
6850         if (netif_alloc_netdev_queues(dev))
6851                 goto free_all;
6852 
6853 #ifdef CONFIG_SYSFS
6854         dev->num_rx_queues = rxqs;
6855         dev->real_num_rx_queues = rxqs;
6856         if (netif_alloc_rx_queues(dev))
6857                 goto free_all;
6858 #endif
6859 
6860         strcpy(dev->name, name);
6861         dev->name_assign_type = name_assign_type;
6862         dev->group = INIT_NETDEV_GROUP;
6863         if (!dev->ethtool_ops)
6864                 dev->ethtool_ops = &default_ethtool_ops;
6865         return dev;
6866 
6867 free_all:
6868         free_netdev(dev);
6869         return NULL;
6870 
6871 free_pcpu:
6872         free_percpu(dev->pcpu_refcnt);
6873 free_dev:
6874         netdev_freemem(dev);
6875         return NULL;
6876 }
6877 EXPORT_SYMBOL(alloc_netdev_mqs);
6878 
6879 /**
6880  *      free_netdev - free network device
6881  *      @dev: device
6882  *
6883  *      This function does the last stage of destroying an allocated device
6884  *      interface. The reference to the device object is released.
6885  *      If this is the last reference then it will be freed.
6886  */
6887 void free_netdev(struct net_device *dev)
6888 {
6889         struct napi_struct *p, *n;
6890 
6891         netif_free_tx_queues(dev);
6892 #ifdef CONFIG_SYSFS
6893         kvfree(dev->_rx);
6894 #endif
6895 
6896         kfree(rcu_dereference_protected(dev->ingress_queue, 1));
6897 
6898         /* Flush device addresses */
6899         dev_addr_flush(dev);
6900 
6901         list_for_each_entry_safe(p, n, &dev->napi_list, dev_list)
6902                 netif_napi_del(p);
6903 
6904         free_percpu(dev->pcpu_refcnt);
6905         dev->pcpu_refcnt = NULL;
6906 
6907         /*  Compatibility with error handling in drivers */
6908         if (dev->reg_state == NETREG_UNINITIALIZED) {
6909                 netdev_freemem(dev);
6910                 return;
6911         }
6912 
6913         BUG_ON(dev->reg_state != NETREG_UNREGISTERED);
6914         dev->reg_state = NETREG_RELEASED;
6915 
6916         /* will free via device release */
6917         put_device(&dev->dev);
6918 }
6919 EXPORT_SYMBOL(free_netdev);
6920 
6921 /**
6922  *      synchronize_net -  Synchronize with packet receive processing
6923  *
6924  *      Wait for packets currently being received to be done.
6925  *      Does not block later packets from starting.
6926  */
6927 void synchronize_net(void)
6928 {
6929         might_sleep();
6930         if (rtnl_is_locked())
6931                 synchronize_rcu_expedited();
6932         else
6933                 synchronize_rcu();
6934 }
6935 EXPORT_SYMBOL(synchronize_net);
6936 
6937 /**
6938  *      unregister_netdevice_queue - remove device from the kernel
6939  *      @dev: device
6940  *      @head: list
6941  *
6942  *      This function shuts down a device interface and removes it
6943  *      from the kernel tables.
6944  *      If head not NULL, device is queued to be unregistered later.
6945  *
6946  *      Callers must hold the rtnl semaphore.  You may want
6947  *      unregister_netdev() instead of this.
6948  */
6949 
6950 void unregister_netdevice_queue(struct net_device *dev, struct list_head *head)
6951 {
6952         ASSERT_RTNL();
6953 
6954         if (head) {
6955                 list_move_tail(&dev->unreg_list, head);
6956         } else {
6957                 rollback_registered(dev);
6958                 /* Finish processing unregister after unlock */
6959                 net_set_todo(dev);
6960         }
6961 }
6962 EXPORT_SYMBOL(unregister_netdevice_queue);
6963 
6964 /**
6965  *      unregister_netdevice_many - unregister many devices
6966  *      @head: list of devices
6967  *
6968  *  Note: As most callers use a stack allocated list_head,
6969  *  we force a list_del() to make sure stack wont be corrupted later.
6970  */
6971 void unregister_netdevice_many(struct list_head *head)
6972 {
6973         struct net_device *dev;
6974 
6975         if (!list_empty(head)) {
6976                 rollback_registered_many(head);
6977                 list_for_each_entry(dev, head, unreg_list)
6978                         net_set_todo(dev);
6979                 list_del(head);
6980         }
6981 }
6982 EXPORT_SYMBOL(unregister_netdevice_many);
6983 
6984 /**
6985  *      unregister_netdev - remove device from the kernel
6986  *      @dev: device
6987  *
6988  *      This function shuts down a device interface and removes it
6989  *      from the kernel tables.
6990  *
6991  *      This is just a wrapper for unregister_netdevice that takes
6992  *      the rtnl semaphore.  In general you want to use this and not
6993  *      unregister_netdevice.
6994  */
6995 void unregister_netdev(struct net_device *dev)
6996 {
6997         rtnl_lock();
6998         unregister_netdevice(dev);
6999         rtnl_unlock();
7000 }
7001 EXPORT_SYMBOL(unregister_netdev);
7002 
7003 /**
7004  *      dev_change_net_namespace - move device to different nethost namespace
7005  *      @dev: device
7006  *      @net: network namespace
7007  *      @pat: If not NULL name pattern to try if the current device name
7008  *            is already taken in the destination network namespace.
7009  *
7010  *      This function shuts down a device interface and moves it
7011  *      to a new network namespace. On success 0 is returned, on
7012  *      a failure a netagive errno code is returned.
7013  *
7014  *      Callers must hold the rtnl semaphore.
7015  */
7016 
7017 int dev_change_net_namespace(struct net_device *dev, struct net *net, const char *pat)
7018 {
7019         int err;
7020 
7021         ASSERT_RTNL();
7022 
7023         /* Don't allow namespace local devices to be moved. */
7024         err = -EINVAL;
7025         if (dev->features & NETIF_F_NETNS_LOCAL)
7026                 goto out;
7027 
7028         /* Ensure the device has been registrered */
7029         if (dev->reg_state != NETREG_REGISTERED)
7030                 goto out;
7031 
7032         /* Get out if there is nothing todo */
7033         err = 0;
7034         if (net_eq(dev_net(dev), net))
7035                 goto out;
7036 
7037         /* Pick the destination device name, and ensure
7038          * we can use it in the destination network namespace.
7039          */
7040         err = -EEXIST;
7041         if (__dev_get_by_name(net, dev->name)) {
7042                 /* We get here if we can't use the current device name */
7043                 if (!pat)
7044                         goto out;
7045                 if (dev_get_valid_name(net, dev, pat) < 0)
7046                         goto out;
7047         }
7048 
7049         /*
7050          * And now a mini version of register_netdevice unregister_netdevice.
7051          */
7052 
7053         /* If device is running close it first. */
7054         dev_close(dev);
7055 
7056         /* And unlink it from device chain */
7057         err = -ENODEV;
7058         unlist_netdevice(dev);
7059 
7060         synchronize_net();
7061 
7062         /* Shutdown queueing discipline. */
7063         dev_shutdown(dev);
7064 
7065         /* Notify protocols, that we are about to destroy
7066            this device. They should clean all the things.
7067 
7068            Note that dev->reg_state stays at NETREG_REGISTERED.
7069            This is wanted because this way 8021q and macvlan know
7070            the device is just moving and can keep their slaves up.
7071         */
7072         call_netdevice_notifiers(NETDEV_UNREGISTER, dev);
7073         rcu_barrier();
7074         call_netdevice_notifiers(NETDEV_UNREGISTER_FINAL, dev);
7075         rtmsg_ifinfo(RTM_DELLINK, dev, ~0U, GFP_KERNEL);
7076 
7077         /*
7078          *      Flush the unicast and multicast chains
7079          */
7080         dev_uc_flush(dev);
7081         dev_mc_flush(dev);
7082 
7083         /* Send a netdev-removed uevent to the old namespace */
7084         kobject_uevent(&dev->dev.kobj, KOBJ_REMOVE);
7085         netdev_adjacent_del_links(dev);
7086 
7087         /* Actually switch the network namespace */
7088         dev_net_set(dev, net);
7089 
7090         /* If there is an ifindex conflict assign a new one */
7091         if (__dev_get_by_index(net, dev->ifindex))
7092                 dev->ifindex = dev_new_index(net);
7093 
7094         /* Send a netdev-add uevent to the new namespace */
7095         kobject_uevent(&dev->dev.kobj, KOBJ_ADD);
7096         netdev_adjacent_add_links(dev);
7097 
7098         /* Fixup kobjects */
7099         err = device_rename(&dev->dev, dev->name);
7100         WARN_ON(err);
7101 
7102         /* Add the device back in the hashes */
7103         list_netdevice(dev);
7104 
7105         /* Notify protocols, that a new device appeared. */
7106         call_netdevice_notifiers(NETDEV_REGISTER, dev);
7107 
7108         /*
7109          *      Prevent userspace races by waiting until the network
7110          *      device is fully setup before sending notifications.
7111          */
7112         rtmsg_ifinfo(RTM_NEWLINK, dev, ~0U, GFP_KERNEL);
7113 
7114         synchronize_net();
7115         err = 0;
7116 out:
7117         return err;
7118 }
7119 EXPORT_SYMBOL_GPL(dev_change_net_namespace);
7120 
7121 static int dev_cpu_callback(struct notifier_block *nfb,
7122                             unsigned long action,
7123                             void *ocpu)
7124 {
7125         struct sk_buff **list_skb;
7126         struct sk_buff *skb;
7127         unsigned int cpu, oldcpu = (unsigned long)ocpu;
7128         struct softnet_data *sd, *oldsd;
7129 
7130         if (action != CPU_DEAD && action != CPU_DEAD_FROZEN)
7131                 return NOTIFY_OK;
7132 
7133         local_irq_disable();
7134         cpu = smp_processor_id();
7135         sd = &per_cpu(softnet_data, cpu);
7136         oldsd = &per_cpu(softnet_data, oldcpu);
7137 
7138         /* Find end of our completion_queue. */
7139         list_skb = &sd->completion_queue;
7140         while (*list_skb)
7141                 list_skb = &(*list_skb)->next;
7142         /* Append completion queue from offline CPU. */
7143         *list_skb = oldsd->completion_queue;
7144         oldsd->completion_queue = NULL;
7145 
7146         /* Append output queue from offline CPU. */
7147         if (oldsd->output_queue) {
7148                 *sd->output_queue_tailp = oldsd->output_queue;
7149                 sd->output_queue_tailp = oldsd->output_queue_tailp;
7150                 oldsd->output_queue = NULL;
7151                 oldsd->output_queue_tailp = &oldsd->output_queue;
7152         }
7153         /* Append NAPI poll list from offline CPU, with one exception :
7154          * process_backlog() must be called by cpu owning percpu backlog.
7155          * We properly handle process_queue & input_pkt_queue later.
7156          */
7157         while (!list_empty(&oldsd->poll_list)) {
7158                 struct napi_struct *napi = list_first_entry(&oldsd->poll_list,
7159                                                             struct napi_struct,
7160                                                             poll_list);
7161 
7162                 list_del_init(&napi->poll_list);
7163                 if (napi->poll == process_backlog)
7164                         napi->state = 0;
7165                 else
7166                         ____napi_schedule(sd, napi);
7167         }
7168 
7169         raise_softirq_irqoff(NET_TX_SOFTIRQ);
7170         local_irq_enable();
7171 
7172         /* Process offline CPU's input_pkt_queue */
7173         while ((skb = __skb_dequeue(&oldsd->process_queue))) {
7174                 netif_rx_ni(skb);
7175                 input_queue_head_incr(oldsd);
7176         }
7177         while ((skb = skb_dequeue(&oldsd->input_pkt_queue))) {
7178                 netif_rx_ni(skb);
7179                 input_queue_head_incr(oldsd);
7180         }
7181 
7182         return NOTIFY_OK;
7183 }
7184 
7185 
7186 /**
7187  *      netdev_increment_features - increment feature set by one
7188  *      @all: current feature set
7189  *      @one: new feature set
7190  *      @mask: mask feature set
7191  *
7192  *      Computes a new feature set after adding a device with feature set
7193  *      @one to the master device with current feature set @all.  Will not
7194  *      enable anything that is off in @mask. Returns the new feature set.
7195  */
7196 netdev_features_t netdev_increment_features(netdev_features_t all,
7197         netdev_features_t one, netdev_features_t mask)
7198 {
7199         if (mask & NETIF_F_GEN_CSUM)
7200                 mask |= NETIF_F_ALL_CSUM;
7201         mask |= NETIF_F_VLAN_CHALLENGED;
7202 
7203         all |= one & (NETIF_F_ONE_FOR_ALL|NETIF_F_ALL_CSUM) & mask;
7204         all &= one | ~NETIF_F_ALL_FOR_ALL;
7205 
7206         /* If one device supports hw checksumming, set for all. */
7207         if (all & NETIF_F_GEN_CSUM)
7208                 all &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_GEN_CSUM);
7209 
7210         return all;
7211 }
7212 EXPORT_SYMBOL(netdev_increment_features);
7213 
7214 static struct hlist_head * __net_init netdev_create_hash(void)
7215 {
7216         int i;
7217         struct hlist_head *hash;
7218 
7219         hash = kmalloc(sizeof(*hash) * NETDEV_HASHENTRIES, GFP_KERNEL);
7220         if (hash != NULL)
7221                 for (i = 0; i < NETDEV_HASHENTRIES; i++)
7222                         INIT_HLIST_HEAD(&hash[i]);
7223 
7224         return hash;
7225 }
7226 
7227 /* Initialize per network namespace state */
7228 static int __net_init netdev_init(struct net *net)
7229 {
7230         if (net != &init_net)
7231                 INIT_LIST_HEAD(&net->dev_base_head);
7232 
7233         net->dev_name_head = netdev_create_hash();
7234         if (net->dev_name_head == NULL)
7235                 goto err_name;
7236 
7237         net->dev_index_head = netdev_create_hash();
7238         if (net->dev_index_head == NULL)
7239                 goto err_idx;
7240 
7241         return 0;
7242 
7243 err_idx:
7244         kfree(net->dev_name_head);
7245 err_name:
7246         return -ENOMEM;
7247 }
7248 
7249 /**
7250  *      netdev_drivername - network driver for the device
7251  *      @dev: network device
7252  *
7253  *      Determine network driver for device.
7254  */
7255 const char *netdev_drivername(const struct net_device *dev)
7256 {
7257         const struct device_driver *driver;
7258         const struct device *parent;
7259         const char *empty = "";
7260 
7261         parent = dev->dev.parent;
7262         if (!parent)
7263                 return empty;
7264 
7265         driver = parent->driver;
7266         if (driver && driver->name)
7267                 return driver->name;
7268         return empty;
7269 }
7270 
7271 static void __netdev_printk(const char *level, const struct net_device *dev,
7272                             struct va_format *vaf)
7273 {
7274         if (dev && dev->dev.parent) {
7275                 dev_printk_emit(level[1] - '',
7276                                 dev->dev.parent,
7277                                 "%s %s %s%s: %pV",
7278                                 dev_driver_string(dev->dev.parent),
7279                                 dev_name(dev->dev.parent),
7280                                 netdev_name(dev), netdev_reg_state(dev),
7281                                 vaf);
7282         } else if (dev) {
7283                 printk("%s%s%s: %pV",
7284                        level, netdev_name(dev), netdev_reg_state(dev), vaf);
7285         } else {
7286                 printk("%s(NULL net_device): %pV", level, vaf);
7287         }
7288 }
7289 
7290 void netdev_printk(const char *level, const struct net_device *dev,
7291                    const char *format, ...)
7292 {
7293         struct va_format vaf;
7294         va_list args;
7295 
7296         va_start(args, format);
7297 
7298         vaf.fmt = format;
7299         vaf.va = &args;
7300 
7301         __netdev_printk(level, dev, &vaf);
7302 
7303         va_end(args);
7304 }
7305 EXPORT_SYMBOL(netdev_printk);
7306 
7307 #define define_netdev_printk_level(func, level)                 \
7308 void func(const struct net_device *dev, const char *fmt, ...)   \
7309 {                                                               \
7310         struct va_format vaf;                                   \
7311         va_list args;                                           \
7312                                                                 \
7313         va_start(args, fmt);                                    \
7314                                                                 \
7315         vaf.fmt = fmt;                                          \
7316         vaf.va = &args;                                         \
7317                                                                 \
7318         __netdev_printk(level, dev, &vaf);                      \
7319                                                                 \
7320         va_end(args);                                           \
7321 }                                                               \
7322 EXPORT_SYMBOL(func);
7323 
7324 define_netdev_printk_level(netdev_emerg, KERN_EMERG);
7325 define_netdev_printk_level(netdev_alert, KERN_ALERT);
7326 define_netdev_printk_level(netdev_crit, KERN_CRIT);
7327 define_netdev_printk_level(netdev_err, KERN_ERR);
7328 define_netdev_printk_level(netdev_warn, KERN_WARNING);
7329 define_netdev_printk_level(netdev_notice, KERN_NOTICE);
7330 define_netdev_printk_level(netdev_info, KERN_INFO);
7331 
7332 static void __net_exit netdev_exit(struct net *net)
7333 {
7334         kfree(net->dev_name_head);
7335         kfree(net->dev_index_head);
7336 }
7337 
7338 static struct pernet_operations __net_initdata netdev_net_ops = {
7339         .init = netdev_init,
7340         .exit = netdev_exit,
7341 };
7342 
7343 static void __net_exit default_device_exit(struct net *net)
7344 {
7345         struct net_device *dev, *aux;
7346         /*
7347          * Push all migratable network devices back to the
7348          * initial network namespace
7349          */
7350         rtnl_lock();
7351         for_each_netdev_safe(net, dev, aux) {
7352                 int err;
7353                 char fb_name[IFNAMSIZ];
7354 
7355                 /* Ignore unmoveable devices (i.e. loopback) */
7356                 if (dev->features & NETIF_F_NETNS_LOCAL)
7357                         continue;
7358 
7359                 /* Leave virtual devices for the generic cleanup */
7360                 if (dev->rtnl_link_ops)
7361                         continue;
7362 
7363                 /* Push remaining network devices to init_net */
7364                 snprintf(fb_name, IFNAMSIZ, "dev%d", dev->ifindex);
7365                 err = dev_change_net_namespace(dev, &init_net, fb_name);
7366                 if (err) {
7367                         pr_emerg("%s: failed to move %s to init_net: %d\n",
7368                                  __func__, dev->name, err);
7369                         BUG();
7370                 }
7371         }
7372         rtnl_unlock();
7373 }
7374 
7375 static void __net_exit rtnl_lock_unregistering(struct list_head *net_list)
7376 {
7377         /* Return with the rtnl_lock held when there are no network
7378          * devices unregistering in any network namespace in net_list.
7379          */
7380         struct net *net;
7381         bool unregistering;
7382         DEFINE_WAIT_FUNC(wait, woken_wake_function);
7383 
7384         add_wait_queue(&netdev_unregistering_wq, &wait);
7385         for (;;) {
7386                 unregistering = false;
7387                 rtnl_lock();
7388                 list_for_each_entry(net, net_list, exit_list) {
7389                         if (net->dev_unreg_count > 0) {
7390                                 unregistering = true;
7391                                 break;
7392                         }
7393                 }
7394                 if (!unregistering)
7395                         break;
7396                 __rtnl_unlock();
7397 
7398                 wait_woken(&wait, TASK_UNINTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
7399         }
7400         remove_wait_queue(&netdev_unregistering_wq, &wait);
7401 }
7402 
7403 static void __net_exit default_device_exit_batch(struct list_head *net_list)
7404 {
7405         /* At exit all network devices most be removed from a network
7406          * namespace.  Do this in the reverse order of registration.
7407          * Do this across as many network namespaces as possible to
7408          * improve batching efficiency.
7409          */
7410         struct net_device *dev;
7411         struct net *net;
7412         LIST_HEAD(dev_kill_list);
7413 
7414         /* To prevent network device cleanup code from dereferencing
7415          * loopback devices or network devices that have been freed
7416          * wait here for all pending unregistrations to complete,
7417          * before unregistring the loopback device and allowing the
7418          * network namespace be freed.
7419          *
7420          * The netdev todo list containing all network devices
7421          * unregistrations that happen in default_device_exit_batch
7422          * will run in the rtnl_unlock() at the end of
7423          * default_device_exit_batch.
7424          */
7425         rtnl_lock_unregistering(net_list);
7426         list_for_each_entry(net, net_list, exit_list) {
7427                 for_each_netdev_reverse(net, dev) {
7428                         if (dev->rtnl_link_ops && dev->rtnl_link_ops->dellink)
7429                                 dev->rtnl_link_ops->dellink(dev, &dev_kill_list);
7430                         else
7431                                 unregister_netdevice_queue(dev, &dev_kill_list);
7432                 }
7433         }
7434         unregister_netdevice_many(&dev_kill_list);
7435         rtnl_unlock();
7436 }
7437 
7438 static struct pernet_operations __net_initdata default_device_ops = {
7439         .exit = default_device_exit,
7440         .exit_batch = default_device_exit_batch,
7441 };
7442 
7443 /*
7444  *      Initialize the DEV module. At boot time this walks the device list and
7445  *      unhooks any devices that fail to initialise (normally hardware not
7446  *      present) and leaves us with a valid list of present and active devices.
7447  *
7448  */
7449 
7450 /*
7451  *       This is called single threaded during boot, so no need
7452  *       to take the rtnl semaphore.
7453  */
7454 static int __init net_dev_init(void)
7455 {
7456         int i, rc = -ENOMEM;
7457 
7458         BUG_ON(!dev_boot_phase);
7459 
7460         if (dev_proc_init())
7461                 goto out;
7462 
7463         if (netdev_kobject_init())
7464                 goto out;
7465 
7466         INIT_LIST_HEAD(&ptype_all);
7467         for (i = 0; i < PTYPE_HASH_SIZE; i++)
7468                 INIT_LIST_HEAD(&ptype_base[i]);
7469 
7470         INIT_LIST_HEAD(&offload_base);
7471 
7472         if (register_pernet_subsys(&netdev_net_ops))
7473                 goto out;
7474 
7475         /*
7476          *      Initialise the packet receive queues.
7477          */
7478 
7479         for_each_possible_cpu(i) {
7480                 struct softnet_data *sd = &per_cpu(softnet_data, i);
7481 
7482                 skb_queue_head_init(&sd->input_pkt_queue);
7483                 skb_queue_head_init(&sd->process_queue);
7484                 INIT_LIST_HEAD(&sd->poll_list);
7485                 sd->output_queue_tailp = &sd->output_queue;
7486 #ifdef CONFIG_RPS
7487                 sd->csd.func = rps_trigger_softirq;
7488                 sd->csd.info = sd;
7489                 sd->cpu = i;
7490 #endif
7491 
7492                 sd->backlog.poll = process_backlog;
7493                 sd->backlog.weight = weight_p;
7494         }
7495 
7496         dev_boot_phase = 0;
7497 
7498         /* The loopback device is special if any other network devices
7499          * is present in a network namespace the loopback device must
7500          * be present. Since we now dynamically allocate and free the
7501          * loopback device ensure this invariant is maintained by
7502          * keeping the loopback device as the first device on the
7503          * list of network devices.  Ensuring the loopback devices
7504          * is the first device that appears and the last network device
7505          * that disappears.
7506          */
7507         if (register_pernet_device(&loopback_net_ops))
7508                 goto out;
7509 
7510         if (register_pernet_device(&default_device_ops))
7511                 goto out;
7512 
7513         open_softirq(NET_TX_SOFTIRQ, net_tx_action);
7514         open_softirq(NET_RX_SOFTIRQ, net_rx_action);
7515 
7516         hotcpu_notifier(dev_cpu_callback, 0);
7517         dst_init();
7518         rc = 0;
7519 out:
7520         return rc;
7521 }
7522 
7523 subsys_initcall(net_dev_init);
7524 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us