Version:  2.0.40 2.2.26 2.4.37 2.6.39 3.0 3.1 3.2 3.3 3.4 3.5 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15

Linux/drivers/net/team/team.c

  1 /*
  2  * drivers/net/team/team.c - Network team device driver
  3  * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
  4  *
  5  * This program is free software; you can redistribute it and/or modify
  6  * it under the terms of the GNU General Public License as published by
  7  * the Free Software Foundation; either version 2 of the License, or
  8  * (at your option) any later version.
  9  */
 10 
 11 #include <linux/kernel.h>
 12 #include <linux/types.h>
 13 #include <linux/module.h>
 14 #include <linux/init.h>
 15 #include <linux/slab.h>
 16 #include <linux/rcupdate.h>
 17 #include <linux/errno.h>
 18 #include <linux/ctype.h>
 19 #include <linux/notifier.h>
 20 #include <linux/netdevice.h>
 21 #include <linux/netpoll.h>
 22 #include <linux/if_vlan.h>
 23 #include <linux/if_arp.h>
 24 #include <linux/socket.h>
 25 #include <linux/etherdevice.h>
 26 #include <linux/rtnetlink.h>
 27 #include <net/rtnetlink.h>
 28 #include <net/genetlink.h>
 29 #include <net/netlink.h>
 30 #include <net/sch_generic.h>
 31 #include <generated/utsrelease.h>
 32 #include <linux/if_team.h>
 33 
 34 #define DRV_NAME "team"
 35 
 36 
 37 /**********
 38  * Helpers
 39  **********/
 40 
 41 #define team_port_exists(dev) (dev->priv_flags & IFF_TEAM_PORT)
 42 
 43 static struct team_port *team_port_get_rcu(const struct net_device *dev)
 44 {
 45         struct team_port *port = rcu_dereference(dev->rx_handler_data);
 46 
 47         return team_port_exists(dev) ? port : NULL;
 48 }
 49 
 50 static struct team_port *team_port_get_rtnl(const struct net_device *dev)
 51 {
 52         struct team_port *port = rtnl_dereference(dev->rx_handler_data);
 53 
 54         return team_port_exists(dev) ? port : NULL;
 55 }
 56 
 57 /*
 58  * Since the ability to change device address for open port device is tested in
 59  * team_port_add, this function can be called without control of return value
 60  */
 61 static int __set_port_dev_addr(struct net_device *port_dev,
 62                                const unsigned char *dev_addr)
 63 {
 64         struct sockaddr addr;
 65 
 66         memcpy(addr.sa_data, dev_addr, port_dev->addr_len);
 67         addr.sa_family = port_dev->type;
 68         return dev_set_mac_address(port_dev, &addr);
 69 }
 70 
 71 static int team_port_set_orig_dev_addr(struct team_port *port)
 72 {
 73         return __set_port_dev_addr(port->dev, port->orig.dev_addr);
 74 }
 75 
 76 static int team_port_set_team_dev_addr(struct team *team,
 77                                        struct team_port *port)
 78 {
 79         return __set_port_dev_addr(port->dev, team->dev->dev_addr);
 80 }
 81 
 82 int team_modeop_port_enter(struct team *team, struct team_port *port)
 83 {
 84         return team_port_set_team_dev_addr(team, port);
 85 }
 86 EXPORT_SYMBOL(team_modeop_port_enter);
 87 
 88 void team_modeop_port_change_dev_addr(struct team *team,
 89                                       struct team_port *port)
 90 {
 91         team_port_set_team_dev_addr(team, port);
 92 }
 93 EXPORT_SYMBOL(team_modeop_port_change_dev_addr);
 94 
 95 static void team_refresh_port_linkup(struct team_port *port)
 96 {
 97         port->linkup = port->user.linkup_enabled ? port->user.linkup :
 98                                                    port->state.linkup;
 99 }
100 
101 
102 /*******************
103  * Options handling
104  *******************/
105 
106 struct team_option_inst { /* One for each option instance */
107         struct list_head list;
108         struct list_head tmp_list;
109         struct team_option *option;
110         struct team_option_inst_info info;
111         bool changed;
112         bool removed;
113 };
114 
115 static struct team_option *__team_find_option(struct team *team,
116                                               const char *opt_name)
117 {
118         struct team_option *option;
119 
120         list_for_each_entry(option, &team->option_list, list) {
121                 if (strcmp(option->name, opt_name) == 0)
122                         return option;
123         }
124         return NULL;
125 }
126 
127 static void __team_option_inst_del(struct team_option_inst *opt_inst)
128 {
129         list_del(&opt_inst->list);
130         kfree(opt_inst);
131 }
132 
133 static void __team_option_inst_del_option(struct team *team,
134                                           struct team_option *option)
135 {
136         struct team_option_inst *opt_inst, *tmp;
137 
138         list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
139                 if (opt_inst->option == option)
140                         __team_option_inst_del(opt_inst);
141         }
142 }
143 
144 static int __team_option_inst_add(struct team *team, struct team_option *option,
145                                   struct team_port *port)
146 {
147         struct team_option_inst *opt_inst;
148         unsigned int array_size;
149         unsigned int i;
150         int err;
151 
152         array_size = option->array_size;
153         if (!array_size)
154                 array_size = 1; /* No array but still need one instance */
155 
156         for (i = 0; i < array_size; i++) {
157                 opt_inst = kmalloc(sizeof(*opt_inst), GFP_KERNEL);
158                 if (!opt_inst)
159                         return -ENOMEM;
160                 opt_inst->option = option;
161                 opt_inst->info.port = port;
162                 opt_inst->info.array_index = i;
163                 opt_inst->changed = true;
164                 opt_inst->removed = false;
165                 list_add_tail(&opt_inst->list, &team->option_inst_list);
166                 if (option->init) {
167                         err = option->init(team, &opt_inst->info);
168                         if (err)
169                                 return err;
170                 }
171 
172         }
173         return 0;
174 }
175 
176 static int __team_option_inst_add_option(struct team *team,
177                                          struct team_option *option)
178 {
179         struct team_port *port;
180         int err;
181 
182         if (!option->per_port) {
183                 err = __team_option_inst_add(team, option, NULL);
184                 if (err)
185                         goto inst_del_option;
186         }
187 
188         list_for_each_entry(port, &team->port_list, list) {
189                 err = __team_option_inst_add(team, option, port);
190                 if (err)
191                         goto inst_del_option;
192         }
193         return 0;
194 
195 inst_del_option:
196         __team_option_inst_del_option(team, option);
197         return err;
198 }
199 
200 static void __team_option_inst_mark_removed_option(struct team *team,
201                                                    struct team_option *option)
202 {
203         struct team_option_inst *opt_inst;
204 
205         list_for_each_entry(opt_inst, &team->option_inst_list, list) {
206                 if (opt_inst->option == option) {
207                         opt_inst->changed = true;
208                         opt_inst->removed = true;
209                 }
210         }
211 }
212 
213 static void __team_option_inst_del_port(struct team *team,
214                                         struct team_port *port)
215 {
216         struct team_option_inst *opt_inst, *tmp;
217 
218         list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
219                 if (opt_inst->option->per_port &&
220                     opt_inst->info.port == port)
221                         __team_option_inst_del(opt_inst);
222         }
223 }
224 
225 static int __team_option_inst_add_port(struct team *team,
226                                        struct team_port *port)
227 {
228         struct team_option *option;
229         int err;
230 
231         list_for_each_entry(option, &team->option_list, list) {
232                 if (!option->per_port)
233                         continue;
234                 err = __team_option_inst_add(team, option, port);
235                 if (err)
236                         goto inst_del_port;
237         }
238         return 0;
239 
240 inst_del_port:
241         __team_option_inst_del_port(team, port);
242         return err;
243 }
244 
245 static void __team_option_inst_mark_removed_port(struct team *team,
246                                                  struct team_port *port)
247 {
248         struct team_option_inst *opt_inst;
249 
250         list_for_each_entry(opt_inst, &team->option_inst_list, list) {
251                 if (opt_inst->info.port == port) {
252                         opt_inst->changed = true;
253                         opt_inst->removed = true;
254                 }
255         }
256 }
257 
258 static int __team_options_register(struct team *team,
259                                    const struct team_option *option,
260                                    size_t option_count)
261 {
262         int i;
263         struct team_option **dst_opts;
264         int err;
265 
266         dst_opts = kzalloc(sizeof(struct team_option *) * option_count,
267                            GFP_KERNEL);
268         if (!dst_opts)
269                 return -ENOMEM;
270         for (i = 0; i < option_count; i++, option++) {
271                 if (__team_find_option(team, option->name)) {
272                         err = -EEXIST;
273                         goto alloc_rollback;
274                 }
275                 dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL);
276                 if (!dst_opts[i]) {
277                         err = -ENOMEM;
278                         goto alloc_rollback;
279                 }
280         }
281 
282         for (i = 0; i < option_count; i++) {
283                 err = __team_option_inst_add_option(team, dst_opts[i]);
284                 if (err)
285                         goto inst_rollback;
286                 list_add_tail(&dst_opts[i]->list, &team->option_list);
287         }
288 
289         kfree(dst_opts);
290         return 0;
291 
292 inst_rollback:
293         for (i--; i >= 0; i--)
294                 __team_option_inst_del_option(team, dst_opts[i]);
295 
296         i = option_count - 1;
297 alloc_rollback:
298         for (i--; i >= 0; i--)
299                 kfree(dst_opts[i]);
300 
301         kfree(dst_opts);
302         return err;
303 }
304 
305 static void __team_options_mark_removed(struct team *team,
306                                         const struct team_option *option,
307                                         size_t option_count)
308 {
309         int i;
310 
311         for (i = 0; i < option_count; i++, option++) {
312                 struct team_option *del_opt;
313 
314                 del_opt = __team_find_option(team, option->name);
315                 if (del_opt)
316                         __team_option_inst_mark_removed_option(team, del_opt);
317         }
318 }
319 
320 static void __team_options_unregister(struct team *team,
321                                       const struct team_option *option,
322                                       size_t option_count)
323 {
324         int i;
325 
326         for (i = 0; i < option_count; i++, option++) {
327                 struct team_option *del_opt;
328 
329                 del_opt = __team_find_option(team, option->name);
330                 if (del_opt) {
331                         __team_option_inst_del_option(team, del_opt);
332                         list_del(&del_opt->list);
333                         kfree(del_opt);
334                 }
335         }
336 }
337 
338 static void __team_options_change_check(struct team *team);
339 
340 int team_options_register(struct team *team,
341                           const struct team_option *option,
342                           size_t option_count)
343 {
344         int err;
345 
346         err = __team_options_register(team, option, option_count);
347         if (err)
348                 return err;
349         __team_options_change_check(team);
350         return 0;
351 }
352 EXPORT_SYMBOL(team_options_register);
353 
354 void team_options_unregister(struct team *team,
355                              const struct team_option *option,
356                              size_t option_count)
357 {
358         __team_options_mark_removed(team, option, option_count);
359         __team_options_change_check(team);
360         __team_options_unregister(team, option, option_count);
361 }
362 EXPORT_SYMBOL(team_options_unregister);
363 
364 static int team_option_get(struct team *team,
365                            struct team_option_inst *opt_inst,
366                            struct team_gsetter_ctx *ctx)
367 {
368         if (!opt_inst->option->getter)
369                 return -EOPNOTSUPP;
370         return opt_inst->option->getter(team, ctx);
371 }
372 
373 static int team_option_set(struct team *team,
374                            struct team_option_inst *opt_inst,
375                            struct team_gsetter_ctx *ctx)
376 {
377         if (!opt_inst->option->setter)
378                 return -EOPNOTSUPP;
379         return opt_inst->option->setter(team, ctx);
380 }
381 
382 void team_option_inst_set_change(struct team_option_inst_info *opt_inst_info)
383 {
384         struct team_option_inst *opt_inst;
385 
386         opt_inst = container_of(opt_inst_info, struct team_option_inst, info);
387         opt_inst->changed = true;
388 }
389 EXPORT_SYMBOL(team_option_inst_set_change);
390 
391 void team_options_change_check(struct team *team)
392 {
393         __team_options_change_check(team);
394 }
395 EXPORT_SYMBOL(team_options_change_check);
396 
397 
398 /****************
399  * Mode handling
400  ****************/
401 
402 static LIST_HEAD(mode_list);
403 static DEFINE_SPINLOCK(mode_list_lock);
404 
405 struct team_mode_item {
406         struct list_head list;
407         const struct team_mode *mode;
408 };
409 
410 static struct team_mode_item *__find_mode(const char *kind)
411 {
412         struct team_mode_item *mitem;
413 
414         list_for_each_entry(mitem, &mode_list, list) {
415                 if (strcmp(mitem->mode->kind, kind) == 0)
416                         return mitem;
417         }
418         return NULL;
419 }
420 
421 static bool is_good_mode_name(const char *name)
422 {
423         while (*name != '\0') {
424                 if (!isalpha(*name) && !isdigit(*name) && *name != '_')
425                         return false;
426                 name++;
427         }
428         return true;
429 }
430 
431 int team_mode_register(const struct team_mode *mode)
432 {
433         int err = 0;
434         struct team_mode_item *mitem;
435 
436         if (!is_good_mode_name(mode->kind) ||
437             mode->priv_size > TEAM_MODE_PRIV_SIZE)
438                 return -EINVAL;
439 
440         mitem = kmalloc(sizeof(*mitem), GFP_KERNEL);
441         if (!mitem)
442                 return -ENOMEM;
443 
444         spin_lock(&mode_list_lock);
445         if (__find_mode(mode->kind)) {
446                 err = -EEXIST;
447                 kfree(mitem);
448                 goto unlock;
449         }
450         mitem->mode = mode;
451         list_add_tail(&mitem->list, &mode_list);
452 unlock:
453         spin_unlock(&mode_list_lock);
454         return err;
455 }
456 EXPORT_SYMBOL(team_mode_register);
457 
458 void team_mode_unregister(const struct team_mode *mode)
459 {
460         struct team_mode_item *mitem;
461 
462         spin_lock(&mode_list_lock);
463         mitem = __find_mode(mode->kind);
464         if (mitem) {
465                 list_del_init(&mitem->list);
466                 kfree(mitem);
467         }
468         spin_unlock(&mode_list_lock);
469 }
470 EXPORT_SYMBOL(team_mode_unregister);
471 
472 static const struct team_mode *team_mode_get(const char *kind)
473 {
474         struct team_mode_item *mitem;
475         const struct team_mode *mode = NULL;
476 
477         spin_lock(&mode_list_lock);
478         mitem = __find_mode(kind);
479         if (!mitem) {
480                 spin_unlock(&mode_list_lock);
481                 request_module("team-mode-%s", kind);
482                 spin_lock(&mode_list_lock);
483                 mitem = __find_mode(kind);
484         }
485         if (mitem) {
486                 mode = mitem->mode;
487                 if (!try_module_get(mode->owner))
488                         mode = NULL;
489         }
490 
491         spin_unlock(&mode_list_lock);
492         return mode;
493 }
494 
495 static void team_mode_put(const struct team_mode *mode)
496 {
497         module_put(mode->owner);
498 }
499 
500 static bool team_dummy_transmit(struct team *team, struct sk_buff *skb)
501 {
502         dev_kfree_skb_any(skb);
503         return false;
504 }
505 
506 static rx_handler_result_t team_dummy_receive(struct team *team,
507                                               struct team_port *port,
508                                               struct sk_buff *skb)
509 {
510         return RX_HANDLER_ANOTHER;
511 }
512 
513 static const struct team_mode __team_no_mode = {
514         .kind           = "*NOMODE*",
515 };
516 
517 static bool team_is_mode_set(struct team *team)
518 {
519         return team->mode != &__team_no_mode;
520 }
521 
522 static void team_set_no_mode(struct team *team)
523 {
524         team->user_carrier_enabled = false;
525         team->mode = &__team_no_mode;
526 }
527 
528 static void team_adjust_ops(struct team *team)
529 {
530         /*
531          * To avoid checks in rx/tx skb paths, ensure here that non-null and
532          * correct ops are always set.
533          */
534 
535         if (!team->en_port_count || !team_is_mode_set(team) ||
536             !team->mode->ops->transmit)
537                 team->ops.transmit = team_dummy_transmit;
538         else
539                 team->ops.transmit = team->mode->ops->transmit;
540 
541         if (!team->en_port_count || !team_is_mode_set(team) ||
542             !team->mode->ops->receive)
543                 team->ops.receive = team_dummy_receive;
544         else
545                 team->ops.receive = team->mode->ops->receive;
546 }
547 
548 /*
549  * We can benefit from the fact that it's ensured no port is present
550  * at the time of mode change. Therefore no packets are in fly so there's no
551  * need to set mode operations in any special way.
552  */
553 static int __team_change_mode(struct team *team,
554                               const struct team_mode *new_mode)
555 {
556         /* Check if mode was previously set and do cleanup if so */
557         if (team_is_mode_set(team)) {
558                 void (*exit_op)(struct team *team) = team->ops.exit;
559 
560                 /* Clear ops area so no callback is called any longer */
561                 memset(&team->ops, 0, sizeof(struct team_mode_ops));
562                 team_adjust_ops(team);
563 
564                 if (exit_op)
565                         exit_op(team);
566                 team_mode_put(team->mode);
567                 team_set_no_mode(team);
568                 /* zero private data area */
569                 memset(&team->mode_priv, 0,
570                        sizeof(struct team) - offsetof(struct team, mode_priv));
571         }
572 
573         if (!new_mode)
574                 return 0;
575 
576         if (new_mode->ops->init) {
577                 int err;
578 
579                 err = new_mode->ops->init(team);
580                 if (err)
581                         return err;
582         }
583 
584         team->mode = new_mode;
585         memcpy(&team->ops, new_mode->ops, sizeof(struct team_mode_ops));
586         team_adjust_ops(team);
587 
588         return 0;
589 }
590 
591 static int team_change_mode(struct team *team, const char *kind)
592 {
593         const struct team_mode *new_mode;
594         struct net_device *dev = team->dev;
595         int err;
596 
597         if (!list_empty(&team->port_list)) {
598                 netdev_err(dev, "No ports can be present during mode change\n");
599                 return -EBUSY;
600         }
601 
602         if (team_is_mode_set(team) && strcmp(team->mode->kind, kind) == 0) {
603                 netdev_err(dev, "Unable to change to the same mode the team is in\n");
604                 return -EINVAL;
605         }
606 
607         new_mode = team_mode_get(kind);
608         if (!new_mode) {
609                 netdev_err(dev, "Mode \"%s\" not found\n", kind);
610                 return -EINVAL;
611         }
612 
613         err = __team_change_mode(team, new_mode);
614         if (err) {
615                 netdev_err(dev, "Failed to change to mode \"%s\"\n", kind);
616                 team_mode_put(new_mode);
617                 return err;
618         }
619 
620         netdev_info(dev, "Mode changed to \"%s\"\n", kind);
621         return 0;
622 }
623 
624 
625 /*********************
626  * Peers notification
627  *********************/
628 
629 static void team_notify_peers_work(struct work_struct *work)
630 {
631         struct team *team;
632 
633         team = container_of(work, struct team, notify_peers.dw.work);
634 
635         if (!rtnl_trylock()) {
636                 schedule_delayed_work(&team->notify_peers.dw, 0);
637                 return;
638         }
639         call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev);
640         rtnl_unlock();
641         if (!atomic_dec_and_test(&team->notify_peers.count_pending))
642                 schedule_delayed_work(&team->notify_peers.dw,
643                                       msecs_to_jiffies(team->notify_peers.interval));
644 }
645 
646 static void team_notify_peers(struct team *team)
647 {
648         if (!team->notify_peers.count || !netif_running(team->dev))
649                 return;
650         atomic_set(&team->notify_peers.count_pending, team->notify_peers.count);
651         schedule_delayed_work(&team->notify_peers.dw, 0);
652 }
653 
654 static void team_notify_peers_init(struct team *team)
655 {
656         INIT_DELAYED_WORK(&team->notify_peers.dw, team_notify_peers_work);
657 }
658 
659 static void team_notify_peers_fini(struct team *team)
660 {
661         cancel_delayed_work_sync(&team->notify_peers.dw);
662 }
663 
664 
665 /*******************************
666  * Send multicast group rejoins
667  *******************************/
668 
669 static void team_mcast_rejoin_work(struct work_struct *work)
670 {
671         struct team *team;
672 
673         team = container_of(work, struct team, mcast_rejoin.dw.work);
674 
675         if (!rtnl_trylock()) {
676                 schedule_delayed_work(&team->mcast_rejoin.dw, 0);
677                 return;
678         }
679         call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev);
680         rtnl_unlock();
681         if (!atomic_dec_and_test(&team->mcast_rejoin.count_pending))
682                 schedule_delayed_work(&team->mcast_rejoin.dw,
683                                       msecs_to_jiffies(team->mcast_rejoin.interval));
684 }
685 
686 static void team_mcast_rejoin(struct team *team)
687 {
688         if (!team->mcast_rejoin.count || !netif_running(team->dev))
689                 return;
690         atomic_set(&team->mcast_rejoin.count_pending, team->mcast_rejoin.count);
691         schedule_delayed_work(&team->mcast_rejoin.dw, 0);
692 }
693 
694 static void team_mcast_rejoin_init(struct team *team)
695 {
696         INIT_DELAYED_WORK(&team->mcast_rejoin.dw, team_mcast_rejoin_work);
697 }
698 
699 static void team_mcast_rejoin_fini(struct team *team)
700 {
701         cancel_delayed_work_sync(&team->mcast_rejoin.dw);
702 }
703 
704 
705 /************************
706  * Rx path frame handler
707  ************************/
708 
709 /* note: already called with rcu_read_lock */
710 static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
711 {
712         struct sk_buff *skb = *pskb;
713         struct team_port *port;
714         struct team *team;
715         rx_handler_result_t res;
716 
717         skb = skb_share_check(skb, GFP_ATOMIC);
718         if (!skb)
719                 return RX_HANDLER_CONSUMED;
720 
721         *pskb = skb;
722 
723         port = team_port_get_rcu(skb->dev);
724         team = port->team;
725         if (!team_port_enabled(port)) {
726                 /* allow exact match delivery for disabled ports */
727                 res = RX_HANDLER_EXACT;
728         } else {
729                 res = team->ops.receive(team, port, skb);
730         }
731         if (res == RX_HANDLER_ANOTHER) {
732                 struct team_pcpu_stats *pcpu_stats;
733 
734                 pcpu_stats = this_cpu_ptr(team->pcpu_stats);
735                 u64_stats_update_begin(&pcpu_stats->syncp);
736                 pcpu_stats->rx_packets++;
737                 pcpu_stats->rx_bytes += skb->len;
738                 if (skb->pkt_type == PACKET_MULTICAST)
739                         pcpu_stats->rx_multicast++;
740                 u64_stats_update_end(&pcpu_stats->syncp);
741 
742                 skb->dev = team->dev;
743         } else {
744                 this_cpu_inc(team->pcpu_stats->rx_dropped);
745         }
746 
747         return res;
748 }
749 
750 
751 /*************************************
752  * Multiqueue Tx port select override
753  *************************************/
754 
755 static int team_queue_override_init(struct team *team)
756 {
757         struct list_head *listarr;
758         unsigned int queue_cnt = team->dev->num_tx_queues - 1;
759         unsigned int i;
760 
761         if (!queue_cnt)
762                 return 0;
763         listarr = kmalloc(sizeof(struct list_head) * queue_cnt, GFP_KERNEL);
764         if (!listarr)
765                 return -ENOMEM;
766         team->qom_lists = listarr;
767         for (i = 0; i < queue_cnt; i++)
768                 INIT_LIST_HEAD(listarr++);
769         return 0;
770 }
771 
772 static void team_queue_override_fini(struct team *team)
773 {
774         kfree(team->qom_lists);
775 }
776 
777 static struct list_head *__team_get_qom_list(struct team *team, u16 queue_id)
778 {
779         return &team->qom_lists[queue_id - 1];
780 }
781 
782 /*
783  * note: already called with rcu_read_lock
784  */
785 static bool team_queue_override_transmit(struct team *team, struct sk_buff *skb)
786 {
787         struct list_head *qom_list;
788         struct team_port *port;
789 
790         if (!team->queue_override_enabled || !skb->queue_mapping)
791                 return false;
792         qom_list = __team_get_qom_list(team, skb->queue_mapping);
793         list_for_each_entry_rcu(port, qom_list, qom_list) {
794                 if (!team_dev_queue_xmit(team, port, skb))
795                         return true;
796         }
797         return false;
798 }
799 
800 static void __team_queue_override_port_del(struct team *team,
801                                            struct team_port *port)
802 {
803         if (!port->queue_id)
804                 return;
805         list_del_rcu(&port->qom_list);
806 }
807 
808 static bool team_queue_override_port_has_gt_prio_than(struct team_port *port,
809                                                       struct team_port *cur)
810 {
811         if (port->priority < cur->priority)
812                 return true;
813         if (port->priority > cur->priority)
814                 return false;
815         if (port->index < cur->index)
816                 return true;
817         return false;
818 }
819 
820 static void __team_queue_override_port_add(struct team *team,
821                                            struct team_port *port)
822 {
823         struct team_port *cur;
824         struct list_head *qom_list;
825         struct list_head *node;
826 
827         if (!port->queue_id)
828                 return;
829         qom_list = __team_get_qom_list(team, port->queue_id);
830         node = qom_list;
831         list_for_each_entry(cur, qom_list, qom_list) {
832                 if (team_queue_override_port_has_gt_prio_than(port, cur))
833                         break;
834                 node = &cur->qom_list;
835         }
836         list_add_tail_rcu(&port->qom_list, node);
837 }
838 
839 static void __team_queue_override_enabled_check(struct team *team)
840 {
841         struct team_port *port;
842         bool enabled = false;
843 
844         list_for_each_entry(port, &team->port_list, list) {
845                 if (port->queue_id) {
846                         enabled = true;
847                         break;
848                 }
849         }
850         if (enabled == team->queue_override_enabled)
851                 return;
852         netdev_dbg(team->dev, "%s queue override\n",
853                    enabled ? "Enabling" : "Disabling");
854         team->queue_override_enabled = enabled;
855 }
856 
857 static void team_queue_override_port_prio_changed(struct team *team,
858                                                   struct team_port *port)
859 {
860         if (!port->queue_id || team_port_enabled(port))
861                 return;
862         __team_queue_override_port_del(team, port);
863         __team_queue_override_port_add(team, port);
864         __team_queue_override_enabled_check(team);
865 }
866 
867 static void team_queue_override_port_change_queue_id(struct team *team,
868                                                      struct team_port *port,
869                                                      u16 new_queue_id)
870 {
871         if (team_port_enabled(port)) {
872                 __team_queue_override_port_del(team, port);
873                 port->queue_id = new_queue_id;
874                 __team_queue_override_port_add(team, port);
875                 __team_queue_override_enabled_check(team);
876         } else {
877                 port->queue_id = new_queue_id;
878         }
879 }
880 
881 static void team_queue_override_port_add(struct team *team,
882                                          struct team_port *port)
883 {
884         __team_queue_override_port_add(team, port);
885         __team_queue_override_enabled_check(team);
886 }
887 
888 static void team_queue_override_port_del(struct team *team,
889                                          struct team_port *port)
890 {
891         __team_queue_override_port_del(team, port);
892         __team_queue_override_enabled_check(team);
893 }
894 
895 
896 /****************
897  * Port handling
898  ****************/
899 
900 static bool team_port_find(const struct team *team,
901                            const struct team_port *port)
902 {
903         struct team_port *cur;
904 
905         list_for_each_entry(cur, &team->port_list, list)
906                 if (cur == port)
907                         return true;
908         return false;
909 }
910 
911 /*
912  * Enable/disable port by adding to enabled port hashlist and setting
913  * port->index (Might be racy so reader could see incorrect ifindex when
914  * processing a flying packet, but that is not a problem). Write guarded
915  * by team->lock.
916  */
917 static void team_port_enable(struct team *team,
918                              struct team_port *port)
919 {
920         if (team_port_enabled(port))
921                 return;
922         port->index = team->en_port_count++;
923         hlist_add_head_rcu(&port->hlist,
924                            team_port_index_hash(team, port->index));
925         team_adjust_ops(team);
926         team_queue_override_port_add(team, port);
927         if (team->ops.port_enabled)
928                 team->ops.port_enabled(team, port);
929         team_notify_peers(team);
930         team_mcast_rejoin(team);
931 }
932 
933 static void __reconstruct_port_hlist(struct team *team, int rm_index)
934 {
935         int i;
936         struct team_port *port;
937 
938         for (i = rm_index + 1; i < team->en_port_count; i++) {
939                 port = team_get_port_by_index(team, i);
940                 hlist_del_rcu(&port->hlist);
941                 port->index--;
942                 hlist_add_head_rcu(&port->hlist,
943                                    team_port_index_hash(team, port->index));
944         }
945 }
946 
947 static void team_port_disable(struct team *team,
948                               struct team_port *port)
949 {
950         if (!team_port_enabled(port))
951                 return;
952         if (team->ops.port_disabled)
953                 team->ops.port_disabled(team, port);
954         hlist_del_rcu(&port->hlist);
955         __reconstruct_port_hlist(team, port->index);
956         port->index = -1;
957         team->en_port_count--;
958         team_queue_override_port_del(team, port);
959         team_adjust_ops(team);
960         team_notify_peers(team);
961         team_mcast_rejoin(team);
962 }
963 
964 #define TEAM_VLAN_FEATURES (NETIF_F_ALL_CSUM | NETIF_F_SG | \
965                             NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
966                             NETIF_F_HIGHDMA | NETIF_F_LRO)
967 
968 static void __team_compute_features(struct team *team)
969 {
970         struct team_port *port;
971         u32 vlan_features = TEAM_VLAN_FEATURES;
972         unsigned short max_hard_header_len = ETH_HLEN;
973         unsigned int flags, dst_release_flag = IFF_XMIT_DST_RELEASE;
974 
975         list_for_each_entry(port, &team->port_list, list) {
976                 vlan_features = netdev_increment_features(vlan_features,
977                                         port->dev->vlan_features,
978                                         TEAM_VLAN_FEATURES);
979 
980                 dst_release_flag &= port->dev->priv_flags;
981                 if (port->dev->hard_header_len > max_hard_header_len)
982                         max_hard_header_len = port->dev->hard_header_len;
983         }
984 
985         team->dev->vlan_features = vlan_features;
986         team->dev->hard_header_len = max_hard_header_len;
987 
988         flags = team->dev->priv_flags & ~IFF_XMIT_DST_RELEASE;
989         team->dev->priv_flags = flags | dst_release_flag;
990 
991         netdev_change_features(team->dev);
992 }
993 
994 static void team_compute_features(struct team *team)
995 {
996         mutex_lock(&team->lock);
997         __team_compute_features(team);
998         mutex_unlock(&team->lock);
999 }
1000 
1001 static int team_port_enter(struct team *team, struct team_port *port)
1002 {
1003         int err = 0;
1004 
1005         dev_hold(team->dev);
1006         port->dev->priv_flags |= IFF_TEAM_PORT;
1007         if (team->ops.port_enter) {
1008                 err = team->ops.port_enter(team, port);
1009                 if (err) {
1010                         netdev_err(team->dev, "Device %s failed to enter team mode\n",
1011                                    port->dev->name);
1012                         goto err_port_enter;
1013                 }
1014         }
1015 
1016         return 0;
1017 
1018 err_port_enter:
1019         port->dev->priv_flags &= ~IFF_TEAM_PORT;
1020         dev_put(team->dev);
1021 
1022         return err;
1023 }
1024 
1025 static void team_port_leave(struct team *team, struct team_port *port)
1026 {
1027         if (team->ops.port_leave)
1028                 team->ops.port_leave(team, port);
1029         port->dev->priv_flags &= ~IFF_TEAM_PORT;
1030         dev_put(team->dev);
1031 }
1032 
1033 #ifdef CONFIG_NET_POLL_CONTROLLER
1034 static int team_port_enable_netpoll(struct team *team, struct team_port *port)
1035 {
1036         struct netpoll *np;
1037         int err;
1038 
1039         if (!team->dev->npinfo)
1040                 return 0;
1041 
1042         np = kzalloc(sizeof(*np), GFP_KERNEL);
1043         if (!np)
1044                 return -ENOMEM;
1045 
1046         err = __netpoll_setup(np, port->dev);
1047         if (err) {
1048                 kfree(np);
1049                 return err;
1050         }
1051         port->np = np;
1052         return err;
1053 }
1054 
1055 static void team_port_disable_netpoll(struct team_port *port)
1056 {
1057         struct netpoll *np = port->np;
1058 
1059         if (!np)
1060                 return;
1061         port->np = NULL;
1062 
1063         /* Wait for transmitting packets to finish before freeing. */
1064         synchronize_rcu_bh();
1065         __netpoll_cleanup(np);
1066         kfree(np);
1067 }
1068 #else
1069 static int team_port_enable_netpoll(struct team *team, struct team_port *port)
1070 {
1071         return 0;
1072 }
1073 static void team_port_disable_netpoll(struct team_port *port)
1074 {
1075 }
1076 #endif
1077 
1078 static void __team_port_change_port_added(struct team_port *port, bool linkup);
1079 static int team_dev_type_check_change(struct net_device *dev,
1080                                       struct net_device *port_dev);
1081 
1082 static int team_port_add(struct team *team, struct net_device *port_dev)
1083 {
1084         struct net_device *dev = team->dev;
1085         struct team_port *port;
1086         char *portname = port_dev->name;
1087         int err;
1088 
1089         if (port_dev->flags & IFF_LOOPBACK) {
1090                 netdev_err(dev, "Device %s is loopback device. Loopback devices can't be added as a team port\n",
1091                            portname);
1092                 return -EINVAL;
1093         }
1094 
1095         if (team_port_exists(port_dev)) {
1096                 netdev_err(dev, "Device %s is already a port "
1097                                 "of a team device\n", portname);
1098                 return -EBUSY;
1099         }
1100 
1101         if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
1102             vlan_uses_dev(dev)) {
1103                 netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n",
1104                            portname);
1105                 return -EPERM;
1106         }
1107 
1108         err = team_dev_type_check_change(dev, port_dev);
1109         if (err)
1110                 return err;
1111 
1112         if (port_dev->flags & IFF_UP) {
1113                 netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n",
1114                            portname);
1115                 return -EBUSY;
1116         }
1117 
1118         port = kzalloc(sizeof(struct team_port) + team->mode->port_priv_size,
1119                        GFP_KERNEL);
1120         if (!port)
1121                 return -ENOMEM;
1122 
1123         port->dev = port_dev;
1124         port->team = team;
1125         INIT_LIST_HEAD(&port->qom_list);
1126 
1127         port->orig.mtu = port_dev->mtu;
1128         err = dev_set_mtu(port_dev, dev->mtu);
1129         if (err) {
1130                 netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err);
1131                 goto err_set_mtu;
1132         }
1133 
1134         memcpy(port->orig.dev_addr, port_dev->dev_addr, port_dev->addr_len);
1135 
1136         err = team_port_enter(team, port);
1137         if (err) {
1138                 netdev_err(dev, "Device %s failed to enter team mode\n",
1139                            portname);
1140                 goto err_port_enter;
1141         }
1142 
1143         err = dev_open(port_dev);
1144         if (err) {
1145                 netdev_dbg(dev, "Device %s opening failed\n",
1146                            portname);
1147                 goto err_dev_open;
1148         }
1149 
1150         err = vlan_vids_add_by_dev(port_dev, dev);
1151         if (err) {
1152                 netdev_err(dev, "Failed to add vlan ids to device %s\n",
1153                                 portname);
1154                 goto err_vids_add;
1155         }
1156 
1157         err = team_port_enable_netpoll(team, port);
1158         if (err) {
1159                 netdev_err(dev, "Failed to enable netpoll on device %s\n",
1160                            portname);
1161                 goto err_enable_netpoll;
1162         }
1163 
1164         err = netdev_master_upper_dev_link(port_dev, dev);
1165         if (err) {
1166                 netdev_err(dev, "Device %s failed to set upper link\n",
1167                            portname);
1168                 goto err_set_upper_link;
1169         }
1170 
1171         err = netdev_rx_handler_register(port_dev, team_handle_frame,
1172                                          port);
1173         if (err) {
1174                 netdev_err(dev, "Device %s failed to register rx_handler\n",
1175                            portname);
1176                 goto err_handler_register;
1177         }
1178 
1179         err = __team_option_inst_add_port(team, port);
1180         if (err) {
1181                 netdev_err(dev, "Device %s failed to add per-port options\n",
1182                            portname);
1183                 goto err_option_port_add;
1184         }
1185 
1186         port->index = -1;
1187         list_add_tail_rcu(&port->list, &team->port_list);
1188         team_port_enable(team, port);
1189         __team_compute_features(team);
1190         __team_port_change_port_added(port, !!netif_carrier_ok(port_dev));
1191         __team_options_change_check(team);
1192 
1193         netdev_info(dev, "Port device %s added\n", portname);
1194 
1195         return 0;
1196 
1197 err_option_port_add:
1198         netdev_rx_handler_unregister(port_dev);
1199 
1200 err_handler_register:
1201         netdev_upper_dev_unlink(port_dev, dev);
1202 
1203 err_set_upper_link:
1204         team_port_disable_netpoll(port);
1205 
1206 err_enable_netpoll:
1207         vlan_vids_del_by_dev(port_dev, dev);
1208 
1209 err_vids_add:
1210         dev_close(port_dev);
1211 
1212 err_dev_open:
1213         team_port_leave(team, port);
1214         team_port_set_orig_dev_addr(port);
1215 
1216 err_port_enter:
1217         dev_set_mtu(port_dev, port->orig.mtu);
1218 
1219 err_set_mtu:
1220         kfree(port);
1221 
1222         return err;
1223 }
1224 
1225 static void __team_port_change_port_removed(struct team_port *port);
1226 
1227 static int team_port_del(struct team *team, struct net_device *port_dev)
1228 {
1229         struct net_device *dev = team->dev;
1230         struct team_port *port;
1231         char *portname = port_dev->name;
1232 
1233         port = team_port_get_rtnl(port_dev);
1234         if (!port || !team_port_find(team, port)) {
1235                 netdev_err(dev, "Device %s does not act as a port of this team\n",
1236                            portname);
1237                 return -ENOENT;
1238         }
1239 
1240         team_port_disable(team, port);
1241         list_del_rcu(&port->list);
1242         netdev_rx_handler_unregister(port_dev);
1243         netdev_upper_dev_unlink(port_dev, dev);
1244         team_port_disable_netpoll(port);
1245         vlan_vids_del_by_dev(port_dev, dev);
1246         dev_uc_unsync(port_dev, dev);
1247         dev_mc_unsync(port_dev, dev);
1248         dev_close(port_dev);
1249         team_port_leave(team, port);
1250 
1251         __team_option_inst_mark_removed_port(team, port);
1252         __team_options_change_check(team);
1253         __team_option_inst_del_port(team, port);
1254         __team_port_change_port_removed(port);
1255 
1256         team_port_set_orig_dev_addr(port);
1257         dev_set_mtu(port_dev, port->orig.mtu);
1258         kfree_rcu(port, rcu);
1259         netdev_info(dev, "Port device %s removed\n", portname);
1260         __team_compute_features(team);
1261 
1262         return 0;
1263 }
1264 
1265 
1266 /*****************
1267  * Net device ops
1268  *****************/
1269 
1270 static int team_mode_option_get(struct team *team, struct team_gsetter_ctx *ctx)
1271 {
1272         ctx->data.str_val = team->mode->kind;
1273         return 0;
1274 }
1275 
1276 static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx)
1277 {
1278         return team_change_mode(team, ctx->data.str_val);
1279 }
1280 
1281 static int team_notify_peers_count_get(struct team *team,
1282                                        struct team_gsetter_ctx *ctx)
1283 {
1284         ctx->data.u32_val = team->notify_peers.count;
1285         return 0;
1286 }
1287 
1288 static int team_notify_peers_count_set(struct team *team,
1289                                        struct team_gsetter_ctx *ctx)
1290 {
1291         team->notify_peers.count = ctx->data.u32_val;
1292         return 0;
1293 }
1294 
1295 static int team_notify_peers_interval_get(struct team *team,
1296                                           struct team_gsetter_ctx *ctx)
1297 {
1298         ctx->data.u32_val = team->notify_peers.interval;
1299         return 0;
1300 }
1301 
1302 static int team_notify_peers_interval_set(struct team *team,
1303                                           struct team_gsetter_ctx *ctx)
1304 {
1305         team->notify_peers.interval = ctx->data.u32_val;
1306         return 0;
1307 }
1308 
1309 static int team_mcast_rejoin_count_get(struct team *team,
1310                                        struct team_gsetter_ctx *ctx)
1311 {
1312         ctx->data.u32_val = team->mcast_rejoin.count;
1313         return 0;
1314 }
1315 
1316 static int team_mcast_rejoin_count_set(struct team *team,
1317                                        struct team_gsetter_ctx *ctx)
1318 {
1319         team->mcast_rejoin.count = ctx->data.u32_val;
1320         return 0;
1321 }
1322 
1323 static int team_mcast_rejoin_interval_get(struct team *team,
1324                                           struct team_gsetter_ctx *ctx)
1325 {
1326         ctx->data.u32_val = team->mcast_rejoin.interval;
1327         return 0;
1328 }
1329 
1330 static int team_mcast_rejoin_interval_set(struct team *team,
1331                                           struct team_gsetter_ctx *ctx)
1332 {
1333         team->mcast_rejoin.interval = ctx->data.u32_val;
1334         return 0;
1335 }
1336 
1337 static int team_port_en_option_get(struct team *team,
1338                                    struct team_gsetter_ctx *ctx)
1339 {
1340         struct team_port *port = ctx->info->port;
1341 
1342         ctx->data.bool_val = team_port_enabled(port);
1343         return 0;
1344 }
1345 
1346 static int team_port_en_option_set(struct team *team,
1347                                    struct team_gsetter_ctx *ctx)
1348 {
1349         struct team_port *port = ctx->info->port;
1350 
1351         if (ctx->data.bool_val)
1352                 team_port_enable(team, port);
1353         else
1354                 team_port_disable(team, port);
1355         return 0;
1356 }
1357 
1358 static int team_user_linkup_option_get(struct team *team,
1359                                        struct team_gsetter_ctx *ctx)
1360 {
1361         struct team_port *port = ctx->info->port;
1362 
1363         ctx->data.bool_val = port->user.linkup;
1364         return 0;
1365 }
1366 
1367 static void __team_carrier_check(struct team *team);
1368 
1369 static int team_user_linkup_option_set(struct team *team,
1370                                        struct team_gsetter_ctx *ctx)
1371 {
1372         struct team_port *port = ctx->info->port;
1373 
1374         port->user.linkup = ctx->data.bool_val;
1375         team_refresh_port_linkup(port);
1376         __team_carrier_check(port->team);
1377         return 0;
1378 }
1379 
1380 static int team_user_linkup_en_option_get(struct team *team,
1381                                           struct team_gsetter_ctx *ctx)
1382 {
1383         struct team_port *port = ctx->info->port;
1384 
1385         ctx->data.bool_val = port->user.linkup_enabled;
1386         return 0;
1387 }
1388 
1389 static int team_user_linkup_en_option_set(struct team *team,
1390                                           struct team_gsetter_ctx *ctx)
1391 {
1392         struct team_port *port = ctx->info->port;
1393 
1394         port->user.linkup_enabled = ctx->data.bool_val;
1395         team_refresh_port_linkup(port);
1396         __team_carrier_check(port->team);
1397         return 0;
1398 }
1399 
1400 static int team_priority_option_get(struct team *team,
1401                                     struct team_gsetter_ctx *ctx)
1402 {
1403         struct team_port *port = ctx->info->port;
1404 
1405         ctx->data.s32_val = port->priority;
1406         return 0;
1407 }
1408 
1409 static int team_priority_option_set(struct team *team,
1410                                     struct team_gsetter_ctx *ctx)
1411 {
1412         struct team_port *port = ctx->info->port;
1413         s32 priority = ctx->data.s32_val;
1414 
1415         if (port->priority == priority)
1416                 return 0;
1417         port->priority = priority;
1418         team_queue_override_port_prio_changed(team, port);
1419         return 0;
1420 }
1421 
1422 static int team_queue_id_option_get(struct team *team,
1423                                     struct team_gsetter_ctx *ctx)
1424 {
1425         struct team_port *port = ctx->info->port;
1426 
1427         ctx->data.u32_val = port->queue_id;
1428         return 0;
1429 }
1430 
1431 static int team_queue_id_option_set(struct team *team,
1432                                     struct team_gsetter_ctx *ctx)
1433 {
1434         struct team_port *port = ctx->info->port;
1435         u16 new_queue_id = ctx->data.u32_val;
1436 
1437         if (port->queue_id == new_queue_id)
1438                 return 0;
1439         if (new_queue_id >= team->dev->real_num_tx_queues)
1440                 return -EINVAL;
1441         team_queue_override_port_change_queue_id(team, port, new_queue_id);
1442         return 0;
1443 }
1444 
1445 static const struct team_option team_options[] = {
1446         {
1447                 .name = "mode",
1448                 .type = TEAM_OPTION_TYPE_STRING,
1449                 .getter = team_mode_option_get,
1450                 .setter = team_mode_option_set,
1451         },
1452         {
1453                 .name = "notify_peers_count",
1454                 .type = TEAM_OPTION_TYPE_U32,
1455                 .getter = team_notify_peers_count_get,
1456                 .setter = team_notify_peers_count_set,
1457         },
1458         {
1459                 .name = "notify_peers_interval",
1460                 .type = TEAM_OPTION_TYPE_U32,
1461                 .getter = team_notify_peers_interval_get,
1462                 .setter = team_notify_peers_interval_set,
1463         },
1464         {
1465                 .name = "mcast_rejoin_count",
1466                 .type = TEAM_OPTION_TYPE_U32,
1467                 .getter = team_mcast_rejoin_count_get,
1468                 .setter = team_mcast_rejoin_count_set,
1469         },
1470         {
1471                 .name = "mcast_rejoin_interval",
1472                 .type = TEAM_OPTION_TYPE_U32,
1473                 .getter = team_mcast_rejoin_interval_get,
1474                 .setter = team_mcast_rejoin_interval_set,
1475         },
1476         {
1477                 .name = "enabled",
1478                 .type = TEAM_OPTION_TYPE_BOOL,
1479                 .per_port = true,
1480                 .getter = team_port_en_option_get,
1481                 .setter = team_port_en_option_set,
1482         },
1483         {
1484                 .name = "user_linkup",
1485                 .type = TEAM_OPTION_TYPE_BOOL,
1486                 .per_port = true,
1487                 .getter = team_user_linkup_option_get,
1488                 .setter = team_user_linkup_option_set,
1489         },
1490         {
1491                 .name = "user_linkup_enabled",
1492                 .type = TEAM_OPTION_TYPE_BOOL,
1493                 .per_port = true,
1494                 .getter = team_user_linkup_en_option_get,
1495                 .setter = team_user_linkup_en_option_set,
1496         },
1497         {
1498                 .name = "priority",
1499                 .type = TEAM_OPTION_TYPE_S32,
1500                 .per_port = true,
1501                 .getter = team_priority_option_get,
1502                 .setter = team_priority_option_set,
1503         },
1504         {
1505                 .name = "queue_id",
1506                 .type = TEAM_OPTION_TYPE_U32,
1507                 .per_port = true,
1508                 .getter = team_queue_id_option_get,
1509                 .setter = team_queue_id_option_set,
1510         },
1511 };
1512 
1513 static struct lock_class_key team_netdev_xmit_lock_key;
1514 static struct lock_class_key team_netdev_addr_lock_key;
1515 static struct lock_class_key team_tx_busylock_key;
1516 
1517 static void team_set_lockdep_class_one(struct net_device *dev,
1518                                        struct netdev_queue *txq,
1519                                        void *unused)
1520 {
1521         lockdep_set_class(&txq->_xmit_lock, &team_netdev_xmit_lock_key);
1522 }
1523 
1524 static void team_set_lockdep_class(struct net_device *dev)
1525 {
1526         lockdep_set_class(&dev->addr_list_lock, &team_netdev_addr_lock_key);
1527         netdev_for_each_tx_queue(dev, team_set_lockdep_class_one, NULL);
1528         dev->qdisc_tx_busylock = &team_tx_busylock_key;
1529 }
1530 
1531 static int team_init(struct net_device *dev)
1532 {
1533         struct team *team = netdev_priv(dev);
1534         int i;
1535         int err;
1536 
1537         team->dev = dev;
1538         mutex_init(&team->lock);
1539         team_set_no_mode(team);
1540 
1541         team->pcpu_stats = netdev_alloc_pcpu_stats(struct team_pcpu_stats);
1542         if (!team->pcpu_stats)
1543                 return -ENOMEM;
1544 
1545         for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
1546                 INIT_HLIST_HEAD(&team->en_port_hlist[i]);
1547         INIT_LIST_HEAD(&team->port_list);
1548         err = team_queue_override_init(team);
1549         if (err)
1550                 goto err_team_queue_override_init;
1551 
1552         team_adjust_ops(team);
1553 
1554         INIT_LIST_HEAD(&team->option_list);
1555         INIT_LIST_HEAD(&team->option_inst_list);
1556 
1557         team_notify_peers_init(team);
1558         team_mcast_rejoin_init(team);
1559 
1560         err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
1561         if (err)
1562                 goto err_options_register;
1563         netif_carrier_off(dev);
1564 
1565         team_set_lockdep_class(dev);
1566 
1567         return 0;
1568 
1569 err_options_register:
1570         team_mcast_rejoin_fini(team);
1571         team_notify_peers_fini(team);
1572         team_queue_override_fini(team);
1573 err_team_queue_override_init:
1574         free_percpu(team->pcpu_stats);
1575 
1576         return err;
1577 }
1578 
1579 static void team_uninit(struct net_device *dev)
1580 {
1581         struct team *team = netdev_priv(dev);
1582         struct team_port *port;
1583         struct team_port *tmp;
1584 
1585         mutex_lock(&team->lock);
1586         list_for_each_entry_safe(port, tmp, &team->port_list, list)
1587                 team_port_del(team, port->dev);
1588 
1589         __team_change_mode(team, NULL); /* cleanup */
1590         __team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
1591         team_mcast_rejoin_fini(team);
1592         team_notify_peers_fini(team);
1593         team_queue_override_fini(team);
1594         mutex_unlock(&team->lock);
1595 }
1596 
1597 static void team_destructor(struct net_device *dev)
1598 {
1599         struct team *team = netdev_priv(dev);
1600 
1601         free_percpu(team->pcpu_stats);
1602         free_netdev(dev);
1603 }
1604 
1605 static int team_open(struct net_device *dev)
1606 {
1607         return 0;
1608 }
1609 
1610 static int team_close(struct net_device *dev)
1611 {
1612         return 0;
1613 }
1614 
1615 /*
1616  * note: already called with rcu_read_lock
1617  */
1618 static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
1619 {
1620         struct team *team = netdev_priv(dev);
1621         bool tx_success;
1622         unsigned int len = skb->len;
1623 
1624         tx_success = team_queue_override_transmit(team, skb);
1625         if (!tx_success)
1626                 tx_success = team->ops.transmit(team, skb);
1627         if (tx_success) {
1628                 struct team_pcpu_stats *pcpu_stats;
1629 
1630                 pcpu_stats = this_cpu_ptr(team->pcpu_stats);
1631                 u64_stats_update_begin(&pcpu_stats->syncp);
1632                 pcpu_stats->tx_packets++;
1633                 pcpu_stats->tx_bytes += len;
1634                 u64_stats_update_end(&pcpu_stats->syncp);
1635         } else {
1636                 this_cpu_inc(team->pcpu_stats->tx_dropped);
1637         }
1638 
1639         return NETDEV_TX_OK;
1640 }
1641 
1642 static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb,
1643                              void *accel_priv, select_queue_fallback_t fallback)
1644 {
1645         /*
1646          * This helper function exists to help dev_pick_tx get the correct
1647          * destination queue.  Using a helper function skips a call to
1648          * skb_tx_hash and will put the skbs in the queue we expect on their
1649          * way down to the team driver.
1650          */
1651         u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
1652 
1653         /*
1654          * Save the original txq to restore before passing to the driver
1655          */
1656         qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
1657 
1658         if (unlikely(txq >= dev->real_num_tx_queues)) {
1659                 do {
1660                         txq -= dev->real_num_tx_queues;
1661                 } while (txq >= dev->real_num_tx_queues);
1662         }
1663         return txq;
1664 }
1665 
1666 static void team_change_rx_flags(struct net_device *dev, int change)
1667 {
1668         struct team *team = netdev_priv(dev);
1669         struct team_port *port;
1670         int inc;
1671 
1672         rcu_read_lock();
1673         list_for_each_entry_rcu(port, &team->port_list, list) {
1674                 if (change & IFF_PROMISC) {
1675                         inc = dev->flags & IFF_PROMISC ? 1 : -1;
1676                         dev_set_promiscuity(port->dev, inc);
1677                 }
1678                 if (change & IFF_ALLMULTI) {
1679                         inc = dev->flags & IFF_ALLMULTI ? 1 : -1;
1680                         dev_set_allmulti(port->dev, inc);
1681                 }
1682         }
1683         rcu_read_unlock();
1684 }
1685 
1686 static void team_set_rx_mode(struct net_device *dev)
1687 {
1688         struct team *team = netdev_priv(dev);
1689         struct team_port *port;
1690 
1691         rcu_read_lock();
1692         list_for_each_entry_rcu(port, &team->port_list, list) {
1693                 dev_uc_sync_multiple(port->dev, dev);
1694                 dev_mc_sync_multiple(port->dev, dev);
1695         }
1696         rcu_read_unlock();
1697 }
1698 
1699 static int team_set_mac_address(struct net_device *dev, void *p)
1700 {
1701         struct sockaddr *addr = p;
1702         struct team *team = netdev_priv(dev);
1703         struct team_port *port;
1704 
1705         if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data))
1706                 return -EADDRNOTAVAIL;
1707         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1708         rcu_read_lock();
1709         list_for_each_entry_rcu(port, &team->port_list, list)
1710                 if (team->ops.port_change_dev_addr)
1711                         team->ops.port_change_dev_addr(team, port);
1712         rcu_read_unlock();
1713         return 0;
1714 }
1715 
1716 static int team_change_mtu(struct net_device *dev, int new_mtu)
1717 {
1718         struct team *team = netdev_priv(dev);
1719         struct team_port *port;
1720         int err;
1721 
1722         /*
1723          * Alhough this is reader, it's guarded by team lock. It's not possible
1724          * to traverse list in reverse under rcu_read_lock
1725          */
1726         mutex_lock(&team->lock);
1727         team->port_mtu_change_allowed = true;
1728         list_for_each_entry(port, &team->port_list, list) {
1729                 err = dev_set_mtu(port->dev, new_mtu);
1730                 if (err) {
1731                         netdev_err(dev, "Device %s failed to change mtu",
1732                                    port->dev->name);
1733                         goto unwind;
1734                 }
1735         }
1736         team->port_mtu_change_allowed = false;
1737         mutex_unlock(&team->lock);
1738 
1739         dev->mtu = new_mtu;
1740 
1741         return 0;
1742 
1743 unwind:
1744         list_for_each_entry_continue_reverse(port, &team->port_list, list)
1745                 dev_set_mtu(port->dev, dev->mtu);
1746         team->port_mtu_change_allowed = false;
1747         mutex_unlock(&team->lock);
1748 
1749         return err;
1750 }
1751 
1752 static struct rtnl_link_stats64 *
1753 team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1754 {
1755         struct team *team = netdev_priv(dev);
1756         struct team_pcpu_stats *p;
1757         u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
1758         u32 rx_dropped = 0, tx_dropped = 0;
1759         unsigned int start;
1760         int i;
1761 
1762         for_each_possible_cpu(i) {
1763                 p = per_cpu_ptr(team->pcpu_stats, i);
1764                 do {
1765                         start = u64_stats_fetch_begin_irq(&p->syncp);
1766                         rx_packets      = p->rx_packets;
1767                         rx_bytes        = p->rx_bytes;
1768                         rx_multicast    = p->rx_multicast;
1769                         tx_packets      = p->tx_packets;
1770                         tx_bytes        = p->tx_bytes;
1771                 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
1772 
1773                 stats->rx_packets       += rx_packets;
1774                 stats->rx_bytes         += rx_bytes;
1775                 stats->multicast        += rx_multicast;
1776                 stats->tx_packets       += tx_packets;
1777                 stats->tx_bytes         += tx_bytes;
1778                 /*
1779                  * rx_dropped & tx_dropped are u32, updated
1780                  * without syncp protection.
1781                  */
1782                 rx_dropped      += p->rx_dropped;
1783                 tx_dropped      += p->tx_dropped;
1784         }
1785         stats->rx_dropped       = rx_dropped;
1786         stats->tx_dropped       = tx_dropped;
1787         return stats;
1788 }
1789 
1790 static int team_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1791 {
1792         struct team *team = netdev_priv(dev);
1793         struct team_port *port;
1794         int err;
1795 
1796         /*
1797          * Alhough this is reader, it's guarded by team lock. It's not possible
1798          * to traverse list in reverse under rcu_read_lock
1799          */
1800         mutex_lock(&team->lock);
1801         list_for_each_entry(port, &team->port_list, list) {
1802                 err = vlan_vid_add(port->dev, proto, vid);
1803                 if (err)
1804                         goto unwind;
1805         }
1806         mutex_unlock(&team->lock);
1807 
1808         return 0;
1809 
1810 unwind:
1811         list_for_each_entry_continue_reverse(port, &team->port_list, list)
1812                 vlan_vid_del(port->dev, proto, vid);
1813         mutex_unlock(&team->lock);
1814 
1815         return err;
1816 }
1817 
1818 static int team_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1819 {
1820         struct team *team = netdev_priv(dev);
1821         struct team_port *port;
1822 
1823         rcu_read_lock();
1824         list_for_each_entry_rcu(port, &team->port_list, list)
1825                 vlan_vid_del(port->dev, proto, vid);
1826         rcu_read_unlock();
1827 
1828         return 0;
1829 }
1830 
1831 #ifdef CONFIG_NET_POLL_CONTROLLER
1832 static void team_poll_controller(struct net_device *dev)
1833 {
1834 }
1835 
1836 static void __team_netpoll_cleanup(struct team *team)
1837 {
1838         struct team_port *port;
1839 
1840         list_for_each_entry(port, &team->port_list, list)
1841                 team_port_disable_netpoll(port);
1842 }
1843 
1844 static void team_netpoll_cleanup(struct net_device *dev)
1845 {
1846         struct team *team = netdev_priv(dev);
1847 
1848         mutex_lock(&team->lock);
1849         __team_netpoll_cleanup(team);
1850         mutex_unlock(&team->lock);
1851 }
1852 
1853 static int team_netpoll_setup(struct net_device *dev,
1854                               struct netpoll_info *npifo)
1855 {
1856         struct team *team = netdev_priv(dev);
1857         struct team_port *port;
1858         int err = 0;
1859 
1860         mutex_lock(&team->lock);
1861         list_for_each_entry(port, &team->port_list, list) {
1862                 err = team_port_enable_netpoll(team, port);
1863                 if (err) {
1864                         __team_netpoll_cleanup(team);
1865                         break;
1866                 }
1867         }
1868         mutex_unlock(&team->lock);
1869         return err;
1870 }
1871 #endif
1872 
1873 static int team_add_slave(struct net_device *dev, struct net_device *port_dev)
1874 {
1875         struct team *team = netdev_priv(dev);
1876         int err;
1877 
1878         mutex_lock(&team->lock);
1879         err = team_port_add(team, port_dev);
1880         mutex_unlock(&team->lock);
1881         return err;
1882 }
1883 
1884 static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
1885 {
1886         struct team *team = netdev_priv(dev);
1887         int err;
1888 
1889         mutex_lock(&team->lock);
1890         err = team_port_del(team, port_dev);
1891         mutex_unlock(&team->lock);
1892         return err;
1893 }
1894 
1895 static netdev_features_t team_fix_features(struct net_device *dev,
1896                                            netdev_features_t features)
1897 {
1898         struct team_port *port;
1899         struct team *team = netdev_priv(dev);
1900         netdev_features_t mask;
1901 
1902         mask = features;
1903         features &= ~NETIF_F_ONE_FOR_ALL;
1904         features |= NETIF_F_ALL_FOR_ALL;
1905 
1906         rcu_read_lock();
1907         list_for_each_entry_rcu(port, &team->port_list, list) {
1908                 features = netdev_increment_features(features,
1909                                                      port->dev->features,
1910                                                      mask);
1911         }
1912         rcu_read_unlock();
1913         return features;
1914 }
1915 
1916 static int team_change_carrier(struct net_device *dev, bool new_carrier)
1917 {
1918         struct team *team = netdev_priv(dev);
1919 
1920         team->user_carrier_enabled = true;
1921 
1922         if (new_carrier)
1923                 netif_carrier_on(dev);
1924         else
1925                 netif_carrier_off(dev);
1926         return 0;
1927 }
1928 
1929 static const struct net_device_ops team_netdev_ops = {
1930         .ndo_init               = team_init,
1931         .ndo_uninit             = team_uninit,
1932         .ndo_open               = team_open,
1933         .ndo_stop               = team_close,
1934         .ndo_start_xmit         = team_xmit,
1935         .ndo_select_queue       = team_select_queue,
1936         .ndo_change_rx_flags    = team_change_rx_flags,
1937         .ndo_set_rx_mode        = team_set_rx_mode,
1938         .ndo_set_mac_address    = team_set_mac_address,
1939         .ndo_change_mtu         = team_change_mtu,
1940         .ndo_get_stats64        = team_get_stats64,
1941         .ndo_vlan_rx_add_vid    = team_vlan_rx_add_vid,
1942         .ndo_vlan_rx_kill_vid   = team_vlan_rx_kill_vid,
1943 #ifdef CONFIG_NET_POLL_CONTROLLER
1944         .ndo_poll_controller    = team_poll_controller,
1945         .ndo_netpoll_setup      = team_netpoll_setup,
1946         .ndo_netpoll_cleanup    = team_netpoll_cleanup,
1947 #endif
1948         .ndo_add_slave          = team_add_slave,
1949         .ndo_del_slave          = team_del_slave,
1950         .ndo_fix_features       = team_fix_features,
1951         .ndo_change_carrier     = team_change_carrier,
1952 };
1953 
1954 /***********************
1955  * ethtool interface
1956  ***********************/
1957 
1958 static void team_ethtool_get_drvinfo(struct net_device *dev,
1959                                      struct ethtool_drvinfo *drvinfo)
1960 {
1961         strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
1962         strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
1963 }
1964 
1965 static const struct ethtool_ops team_ethtool_ops = {
1966         .get_drvinfo            = team_ethtool_get_drvinfo,
1967         .get_link               = ethtool_op_get_link,
1968 };
1969 
1970 /***********************
1971  * rt netlink interface
1972  ***********************/
1973 
1974 static void team_setup_by_port(struct net_device *dev,
1975                                struct net_device *port_dev)
1976 {
1977         dev->header_ops = port_dev->header_ops;
1978         dev->type = port_dev->type;
1979         dev->hard_header_len = port_dev->hard_header_len;
1980         dev->addr_len = port_dev->addr_len;
1981         dev->mtu = port_dev->mtu;
1982         memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len);
1983         eth_hw_addr_inherit(dev, port_dev);
1984 }
1985 
1986 static int team_dev_type_check_change(struct net_device *dev,
1987                                       struct net_device *port_dev)
1988 {
1989         struct team *team = netdev_priv(dev);
1990         char *portname = port_dev->name;
1991         int err;
1992 
1993         if (dev->type == port_dev->type)
1994                 return 0;
1995         if (!list_empty(&team->port_list)) {
1996                 netdev_err(dev, "Device %s is of different type\n", portname);
1997                 return -EBUSY;
1998         }
1999         err = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, dev);
2000         err = notifier_to_errno(err);
2001         if (err) {
2002                 netdev_err(dev, "Refused to change device type\n");
2003                 return err;
2004         }
2005         dev_uc_flush(dev);
2006         dev_mc_flush(dev);
2007         team_setup_by_port(dev, port_dev);
2008         call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
2009         return 0;
2010 }
2011 
2012 static void team_setup(struct net_device *dev)
2013 {
2014         ether_setup(dev);
2015 
2016         dev->netdev_ops = &team_netdev_ops;
2017         dev->ethtool_ops = &team_ethtool_ops;
2018         dev->destructor = team_destructor;
2019         dev->tx_queue_len = 0;
2020         dev->flags |= IFF_MULTICAST;
2021         dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
2022 
2023         /*
2024          * Indicate we support unicast address filtering. That way core won't
2025          * bring us to promisc mode in case a unicast addr is added.
2026          * Let this up to underlay drivers.
2027          */
2028         dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
2029 
2030         dev->features |= NETIF_F_LLTX;
2031         dev->features |= NETIF_F_GRO;
2032 
2033         /* Don't allow team devices to change network namespaces. */
2034         dev->features |= NETIF_F_NETNS_LOCAL;
2035 
2036         dev->hw_features = TEAM_VLAN_FEATURES |
2037                            NETIF_F_HW_VLAN_CTAG_TX |
2038                            NETIF_F_HW_VLAN_CTAG_RX |
2039                            NETIF_F_HW_VLAN_CTAG_FILTER;
2040 
2041         dev->hw_features &= ~(NETIF_F_ALL_CSUM & ~NETIF_F_HW_CSUM);
2042         dev->features |= dev->hw_features;
2043 }
2044 
2045 static int team_newlink(struct net *src_net, struct net_device *dev,
2046                         struct nlattr *tb[], struct nlattr *data[])
2047 {
2048         int err;
2049 
2050         if (tb[IFLA_ADDRESS] == NULL)
2051                 eth_hw_addr_random(dev);
2052 
2053         err = register_netdevice(dev);
2054         if (err)
2055                 return err;
2056 
2057         return 0;
2058 }
2059 
2060 static int team_validate(struct nlattr *tb[], struct nlattr *data[])
2061 {
2062         if (tb[IFLA_ADDRESS]) {
2063                 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
2064                         return -EINVAL;
2065                 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
2066                         return -EADDRNOTAVAIL;
2067         }
2068         return 0;
2069 }
2070 
2071 static unsigned int team_get_num_tx_queues(void)
2072 {
2073         return TEAM_DEFAULT_NUM_TX_QUEUES;
2074 }
2075 
2076 static unsigned int team_get_num_rx_queues(void)
2077 {
2078         return TEAM_DEFAULT_NUM_RX_QUEUES;
2079 }
2080 
2081 static struct rtnl_link_ops team_link_ops __read_mostly = {
2082         .kind                   = DRV_NAME,
2083         .priv_size              = sizeof(struct team),
2084         .setup                  = team_setup,
2085         .newlink                = team_newlink,
2086         .validate               = team_validate,
2087         .get_num_tx_queues      = team_get_num_tx_queues,
2088         .get_num_rx_queues      = team_get_num_rx_queues,
2089 };
2090 
2091 
2092 /***********************************
2093  * Generic netlink custom interface
2094  ***********************************/
2095 
2096 static struct genl_family team_nl_family = {
2097         .id             = GENL_ID_GENERATE,
2098         .name           = TEAM_GENL_NAME,
2099         .version        = TEAM_GENL_VERSION,
2100         .maxattr        = TEAM_ATTR_MAX,
2101         .netnsok        = true,
2102 };
2103 
2104 static const struct nla_policy team_nl_policy[TEAM_ATTR_MAX + 1] = {
2105         [TEAM_ATTR_UNSPEC]                      = { .type = NLA_UNSPEC, },
2106         [TEAM_ATTR_TEAM_IFINDEX]                = { .type = NLA_U32 },
2107         [TEAM_ATTR_LIST_OPTION]                 = { .type = NLA_NESTED },
2108         [TEAM_ATTR_LIST_PORT]                   = { .type = NLA_NESTED },
2109 };
2110 
2111 static const struct nla_policy
2112 team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = {
2113         [TEAM_ATTR_OPTION_UNSPEC]               = { .type = NLA_UNSPEC, },
2114         [TEAM_ATTR_OPTION_NAME] = {
2115                 .type = NLA_STRING,
2116                 .len = TEAM_STRING_MAX_LEN,
2117         },
2118         [TEAM_ATTR_OPTION_CHANGED]              = { .type = NLA_FLAG },
2119         [TEAM_ATTR_OPTION_TYPE]                 = { .type = NLA_U8 },
2120         [TEAM_ATTR_OPTION_DATA]                 = { .type = NLA_BINARY },
2121 };
2122 
2123 static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
2124 {
2125         struct sk_buff *msg;
2126         void *hdr;
2127         int err;
2128 
2129         msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2130         if (!msg)
2131                 return -ENOMEM;
2132 
2133         hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
2134                           &team_nl_family, 0, TEAM_CMD_NOOP);
2135         if (!hdr) {
2136                 err = -EMSGSIZE;
2137                 goto err_msg_put;
2138         }
2139 
2140         genlmsg_end(msg, hdr);
2141 
2142         return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
2143 
2144 err_msg_put:
2145         nlmsg_free(msg);
2146 
2147         return err;
2148 }
2149 
2150 /*
2151  * Netlink cmd functions should be locked by following two functions.
2152  * Since dev gets held here, that ensures dev won't disappear in between.
2153  */
2154 static struct team *team_nl_team_get(struct genl_info *info)
2155 {
2156         struct net *net = genl_info_net(info);
2157         int ifindex;
2158         struct net_device *dev;
2159         struct team *team;
2160 
2161         if (!info->attrs[TEAM_ATTR_TEAM_IFINDEX])
2162                 return NULL;
2163 
2164         ifindex = nla_get_u32(info->attrs[TEAM_ATTR_TEAM_IFINDEX]);
2165         dev = dev_get_by_index(net, ifindex);
2166         if (!dev || dev->netdev_ops != &team_netdev_ops) {
2167                 if (dev)
2168                         dev_put(dev);
2169                 return NULL;
2170         }
2171 
2172         team = netdev_priv(dev);
2173         mutex_lock(&team->lock);
2174         return team;
2175 }
2176 
2177 static void team_nl_team_put(struct team *team)
2178 {
2179         mutex_unlock(&team->lock);
2180         dev_put(team->dev);
2181 }
2182 
2183 typedef int team_nl_send_func_t(struct sk_buff *skb,
2184                                 struct team *team, u32 portid);
2185 
2186 static int team_nl_send_unicast(struct sk_buff *skb, struct team *team, u32 portid)
2187 {
2188         return genlmsg_unicast(dev_net(team->dev), skb, portid);
2189 }
2190 
2191 static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team,
2192                                        struct team_option_inst *opt_inst)
2193 {
2194         struct nlattr *option_item;
2195         struct team_option *option = opt_inst->option;
2196         struct team_option_inst_info *opt_inst_info = &opt_inst->info;
2197         struct team_gsetter_ctx ctx;
2198         int err;
2199 
2200         ctx.info = opt_inst_info;
2201         err = team_option_get(team, opt_inst, &ctx);
2202         if (err)
2203                 return err;
2204 
2205         option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION);
2206         if (!option_item)
2207                 return -EMSGSIZE;
2208 
2209         if (nla_put_string(skb, TEAM_ATTR_OPTION_NAME, option->name))
2210                 goto nest_cancel;
2211         if (opt_inst_info->port &&
2212             nla_put_u32(skb, TEAM_ATTR_OPTION_PORT_IFINDEX,
2213                         opt_inst_info->port->dev->ifindex))
2214                 goto nest_cancel;
2215         if (opt_inst->option->array_size &&
2216             nla_put_u32(skb, TEAM_ATTR_OPTION_ARRAY_INDEX,
2217                         opt_inst_info->array_index))
2218                 goto nest_cancel;
2219 
2220         switch (option->type) {
2221         case TEAM_OPTION_TYPE_U32:
2222                 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32))
2223                         goto nest_cancel;
2224                 if (nla_put_u32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.u32_val))
2225                         goto nest_cancel;
2226                 break;
2227         case TEAM_OPTION_TYPE_STRING:
2228                 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING))
2229                         goto nest_cancel;
2230                 if (nla_put_string(skb, TEAM_ATTR_OPTION_DATA,
2231                                    ctx.data.str_val))
2232                         goto nest_cancel;
2233                 break;
2234         case TEAM_OPTION_TYPE_BINARY:
2235                 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_BINARY))
2236                         goto nest_cancel;
2237                 if (nla_put(skb, TEAM_ATTR_OPTION_DATA, ctx.data.bin_val.len,
2238                             ctx.data.bin_val.ptr))
2239                         goto nest_cancel;
2240                 break;
2241         case TEAM_OPTION_TYPE_BOOL:
2242                 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_FLAG))
2243                         goto nest_cancel;
2244                 if (ctx.data.bool_val &&
2245                     nla_put_flag(skb, TEAM_ATTR_OPTION_DATA))
2246                         goto nest_cancel;
2247                 break;
2248         case TEAM_OPTION_TYPE_S32:
2249                 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_S32))
2250                         goto nest_cancel;
2251                 if (nla_put_s32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.s32_val))
2252                         goto nest_cancel;
2253                 break;
2254         default:
2255                 BUG();
2256         }
2257         if (opt_inst->removed && nla_put_flag(skb, TEAM_ATTR_OPTION_REMOVED))
2258                 goto nest_cancel;
2259         if (opt_inst->changed) {
2260                 if (nla_put_flag(skb, TEAM_ATTR_OPTION_CHANGED))
2261                         goto nest_cancel;
2262                 opt_inst->changed = false;
2263         }
2264         nla_nest_end(skb, option_item);
2265         return 0;
2266 
2267 nest_cancel:
2268         nla_nest_cancel(skb, option_item);
2269         return -EMSGSIZE;
2270 }
2271 
2272 static int __send_and_alloc_skb(struct sk_buff **pskb,
2273                                 struct team *team, u32 portid,
2274                                 team_nl_send_func_t *send_func)
2275 {
2276         int err;
2277 
2278         if (*pskb) {
2279                 err = send_func(*pskb, team, portid);
2280                 if (err)
2281                         return err;
2282         }
2283         *pskb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
2284         if (!*pskb)
2285                 return -ENOMEM;
2286         return 0;
2287 }
2288 
2289 static int team_nl_send_options_get(struct team *team, u32 portid, u32 seq,
2290                                     int flags, team_nl_send_func_t *send_func,
2291                                     struct list_head *sel_opt_inst_list)
2292 {
2293         struct nlattr *option_list;
2294         struct nlmsghdr *nlh;
2295         void *hdr;
2296         struct team_option_inst *opt_inst;
2297         int err;
2298         struct sk_buff *skb = NULL;
2299         bool incomplete;
2300         int i;
2301 
2302         opt_inst = list_first_entry(sel_opt_inst_list,
2303                                     struct team_option_inst, tmp_list);
2304 
2305 start_again:
2306         err = __send_and_alloc_skb(&skb, team, portid, send_func);
2307         if (err)
2308                 return err;
2309 
2310         hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
2311                           TEAM_CMD_OPTIONS_GET);
2312         if (!hdr)
2313                 return -EMSGSIZE;
2314 
2315         if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
2316                 goto nla_put_failure;
2317         option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION);
2318         if (!option_list)
2319                 goto nla_put_failure;
2320 
2321         i = 0;
2322         incomplete = false;
2323         list_for_each_entry_from(opt_inst, sel_opt_inst_list, tmp_list) {
2324                 err = team_nl_fill_one_option_get(skb, team, opt_inst);
2325                 if (err) {
2326                         if (err == -EMSGSIZE) {
2327                                 if (!i)
2328                                         goto errout;
2329                                 incomplete = true;
2330                                 break;
2331                         }
2332                         goto errout;
2333                 }
2334                 i++;
2335         }
2336 
2337         nla_nest_end(skb, option_list);
2338         genlmsg_end(skb, hdr);
2339         if (incomplete)
2340                 goto start_again;
2341 
2342 send_done:
2343         nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
2344         if (!nlh) {
2345                 err = __send_and_alloc_skb(&skb, team, portid, send_func);
2346                 if (err)
2347                         goto errout;
2348                 goto send_done;
2349         }
2350 
2351         return send_func(skb, team, portid);
2352 
2353 nla_put_failure:
2354         err = -EMSGSIZE;
2355 errout:
2356         genlmsg_cancel(skb, hdr);
2357         nlmsg_free(skb);
2358         return err;
2359 }
2360 
2361 static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
2362 {
2363         struct team *team;
2364         struct team_option_inst *opt_inst;
2365         int err;
2366         LIST_HEAD(sel_opt_inst_list);
2367 
2368         team = team_nl_team_get(info);
2369         if (!team)
2370                 return -EINVAL;
2371 
2372         list_for_each_entry(opt_inst, &team->option_inst_list, list)
2373                 list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
2374         err = team_nl_send_options_get(team, info->snd_portid, info->snd_seq,
2375                                        NLM_F_ACK, team_nl_send_unicast,
2376                                        &sel_opt_inst_list);
2377 
2378         team_nl_team_put(team);
2379 
2380         return err;
2381 }
2382 
2383 static int team_nl_send_event_options_get(struct team *team,
2384                                           struct list_head *sel_opt_inst_list);
2385 
2386 static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
2387 {
2388         struct team *team;
2389         int err = 0;
2390         int i;
2391         struct nlattr *nl_option;
2392         LIST_HEAD(opt_inst_list);
2393 
2394         team = team_nl_team_get(info);
2395         if (!team)
2396                 return -EINVAL;
2397 
2398         err = -EINVAL;
2399         if (!info->attrs[TEAM_ATTR_LIST_OPTION]) {
2400                 err = -EINVAL;
2401                 goto team_put;
2402         }
2403 
2404         nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) {
2405                 struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
2406                 struct nlattr *attr;
2407                 struct nlattr *attr_data;
2408                 enum team_option_type opt_type;
2409                 int opt_port_ifindex = 0; /* != 0 for per-port options */
2410                 u32 opt_array_index = 0;
2411                 bool opt_is_array = false;
2412                 struct team_option_inst *opt_inst;
2413                 char *opt_name;
2414                 bool opt_found = false;
2415 
2416                 if (nla_type(nl_option) != TEAM_ATTR_ITEM_OPTION) {
2417                         err = -EINVAL;
2418                         goto team_put;
2419                 }
2420                 err = nla_parse_nested(opt_attrs, TEAM_ATTR_OPTION_MAX,
2421                                        nl_option, team_nl_option_policy);
2422                 if (err)
2423                         goto team_put;
2424                 if (!opt_attrs[TEAM_ATTR_OPTION_NAME] ||
2425                     !opt_attrs[TEAM_ATTR_OPTION_TYPE]) {
2426                         err = -EINVAL;
2427                         goto team_put;
2428                 }
2429                 switch (nla_get_u8(opt_attrs[TEAM_ATTR_OPTION_TYPE])) {
2430                 case NLA_U32:
2431                         opt_type = TEAM_OPTION_TYPE_U32;
2432                         break;
2433                 case NLA_STRING:
2434                         opt_type = TEAM_OPTION_TYPE_STRING;
2435                         break;
2436                 case NLA_BINARY:
2437                         opt_type = TEAM_OPTION_TYPE_BINARY;
2438                         break;
2439                 case NLA_FLAG:
2440                         opt_type = TEAM_OPTION_TYPE_BOOL;
2441                         break;
2442                 case NLA_S32:
2443                         opt_type = TEAM_OPTION_TYPE_S32;
2444                         break;
2445                 default:
2446                         goto team_put;
2447                 }
2448 
2449                 attr_data = opt_attrs[TEAM_ATTR_OPTION_DATA];
2450                 if (opt_type != TEAM_OPTION_TYPE_BOOL && !attr_data) {
2451                         err = -EINVAL;
2452                         goto team_put;
2453                 }
2454 
2455                 opt_name = nla_data(opt_attrs[TEAM_ATTR_OPTION_NAME]);
2456                 attr = opt_attrs[TEAM_ATTR_OPTION_PORT_IFINDEX];
2457                 if (attr)
2458                         opt_port_ifindex = nla_get_u32(attr);
2459 
2460                 attr = opt_attrs[TEAM_ATTR_OPTION_ARRAY_INDEX];
2461                 if (attr) {
2462                         opt_is_array = true;
2463                         opt_array_index = nla_get_u32(attr);
2464                 }
2465 
2466                 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
2467                         struct team_option *option = opt_inst->option;
2468                         struct team_gsetter_ctx ctx;
2469                         struct team_option_inst_info *opt_inst_info;
2470                         int tmp_ifindex;
2471 
2472                         opt_inst_info = &opt_inst->info;
2473                         tmp_ifindex = opt_inst_info->port ?
2474                                       opt_inst_info->port->dev->ifindex : 0;
2475                         if (option->type != opt_type ||
2476                             strcmp(option->name, opt_name) ||
2477                             tmp_ifindex != opt_port_ifindex ||
2478                             (option->array_size && !opt_is_array) ||
2479                             opt_inst_info->array_index != opt_array_index)
2480                                 continue;
2481                         opt_found = true;
2482                         ctx.info = opt_inst_info;
2483                         switch (opt_type) {
2484                         case TEAM_OPTION_TYPE_U32:
2485                                 ctx.data.u32_val = nla_get_u32(attr_data);
2486                                 break;
2487                         case TEAM_OPTION_TYPE_STRING:
2488                                 if (nla_len(attr_data) > TEAM_STRING_MAX_LEN) {
2489                                         err = -EINVAL;
2490                                         goto team_put;
2491                                 }
2492                                 ctx.data.str_val = nla_data(attr_data);
2493                                 break;
2494                         case TEAM_OPTION_TYPE_BINARY:
2495                                 ctx.data.bin_val.len = nla_len(attr_data);
2496                                 ctx.data.bin_val.ptr = nla_data(attr_data);
2497                                 break;
2498                         case TEAM_OPTION_TYPE_BOOL:
2499                                 ctx.data.bool_val = attr_data ? true : false;
2500                                 break;
2501                         case TEAM_OPTION_TYPE_S32:
2502                                 ctx.data.s32_val = nla_get_s32(attr_data);
2503                                 break;
2504                         default:
2505                                 BUG();
2506                         }
2507                         err = team_option_set(team, opt_inst, &ctx);
2508                         if (err)
2509                                 goto team_put;
2510                         opt_inst->changed = true;
2511                         list_add(&opt_inst->tmp_list, &opt_inst_list);
2512                 }
2513                 if (!opt_found) {
2514                         err = -ENOENT;
2515                         goto team_put;
2516                 }
2517         }
2518 
2519         err = team_nl_send_event_options_get(team, &opt_inst_list);
2520 
2521 team_put:
2522         team_nl_team_put(team);
2523 
2524         return err;
2525 }
2526 
2527 static int team_nl_fill_one_port_get(struct sk_buff *skb,
2528                                      struct team_port *port)
2529 {
2530         struct nlattr *port_item;
2531 
2532         port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT);
2533         if (!port_item)
2534                 goto nest_cancel;
2535         if (nla_put_u32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex))
2536                 goto nest_cancel;
2537         if (port->changed) {
2538                 if (nla_put_flag(skb, TEAM_ATTR_PORT_CHANGED))
2539                         goto nest_cancel;
2540                 port->changed = false;
2541         }
2542         if ((port->removed &&
2543              nla_put_flag(skb, TEAM_ATTR_PORT_REMOVED)) ||
2544             (port->state.linkup &&
2545              nla_put_flag(skb, TEAM_ATTR_PORT_LINKUP)) ||
2546             nla_put_u32(skb, TEAM_ATTR_PORT_SPEED, port->state.speed) ||
2547             nla_put_u8(skb, TEAM_ATTR_PORT_DUPLEX, port->state.duplex))
2548                 goto nest_cancel;
2549         nla_nest_end(skb, port_item);
2550         return 0;
2551 
2552 nest_cancel:
2553         nla_nest_cancel(skb, port_item);
2554         return -EMSGSIZE;
2555 }
2556 
2557 static int team_nl_send_port_list_get(struct team *team, u32 portid, u32 seq,
2558                                       int flags, team_nl_send_func_t *send_func,
2559                                       struct team_port *one_port)
2560 {
2561         struct nlattr *port_list;
2562         struct nlmsghdr *nlh;
2563         void *hdr;
2564         struct team_port *port;
2565         int err;
2566         struct sk_buff *skb = NULL;
2567         bool incomplete;
2568         int i;
2569 
2570         port = list_first_entry_or_null(&team->port_list,
2571                                         struct team_port, list);
2572 
2573 start_again:
2574         err = __send_and_alloc_skb(&skb, team, portid, send_func);
2575         if (err)
2576                 return err;
2577 
2578         hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
2579                           TEAM_CMD_PORT_LIST_GET);
2580         if (!hdr)
2581                 return -EMSGSIZE;
2582 
2583         if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
2584                 goto nla_put_failure;
2585         port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT);
2586         if (!port_list)
2587                 goto nla_put_failure;
2588 
2589         i = 0;
2590         incomplete = false;
2591 
2592         /* If one port is selected, called wants to send port list containing
2593          * only this port. Otherwise go through all listed ports and send all
2594          */
2595         if (one_port) {
2596                 err = team_nl_fill_one_port_get(skb, one_port);
2597                 if (err)
2598                         goto errout;
2599         } else if (port) {
2600                 list_for_each_entry_from(port, &team->port_list, list) {
2601                         err = team_nl_fill_one_port_get(skb, port);
2602                         if (err) {
2603                                 if (err == -EMSGSIZE) {
2604                                         if (!i)
2605                                                 goto errout;
2606                                         incomplete = true;
2607                                         break;
2608                                 }
2609                                 goto errout;
2610                         }
2611                         i++;
2612                 }
2613         }
2614 
2615         nla_nest_end(skb, port_list);
2616         genlmsg_end(skb, hdr);
2617         if (incomplete)
2618                 goto start_again;
2619 
2620 send_done:
2621         nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
2622         if (!nlh) {
2623                 err = __send_and_alloc_skb(&skb, team, portid, send_func);
2624                 if (err)
2625                         goto errout;
2626                 goto send_done;
2627         }
2628 
2629         return send_func(skb, team, portid);
2630 
2631 nla_put_failure:
2632         err = -EMSGSIZE;
2633 errout:
2634         genlmsg_cancel(skb, hdr);
2635         nlmsg_free(skb);
2636         return err;
2637 }
2638 
2639 static int team_nl_cmd_port_list_get(struct sk_buff *skb,
2640                                      struct genl_info *info)
2641 {
2642         struct team *team;
2643         int err;
2644 
2645         team = team_nl_team_get(info);
2646         if (!team)
2647                 return -EINVAL;
2648 
2649         err = team_nl_send_port_list_get(team, info->snd_portid, info->snd_seq,
2650                                          NLM_F_ACK, team_nl_send_unicast, NULL);
2651 
2652         team_nl_team_put(team);
2653 
2654         return err;
2655 }
2656 
2657 static const struct genl_ops team_nl_ops[] = {
2658         {
2659                 .cmd = TEAM_CMD_NOOP,
2660                 .doit = team_nl_cmd_noop,
2661                 .policy = team_nl_policy,
2662         },
2663         {
2664                 .cmd = TEAM_CMD_OPTIONS_SET,
2665                 .doit = team_nl_cmd_options_set,
2666                 .policy = team_nl_policy,
2667                 .flags = GENL_ADMIN_PERM,
2668         },
2669         {
2670                 .cmd = TEAM_CMD_OPTIONS_GET,
2671                 .doit = team_nl_cmd_options_get,
2672                 .policy = team_nl_policy,
2673                 .flags = GENL_ADMIN_PERM,
2674         },
2675         {
2676                 .cmd = TEAM_CMD_PORT_LIST_GET,
2677                 .doit = team_nl_cmd_port_list_get,
2678                 .policy = team_nl_policy,
2679                 .flags = GENL_ADMIN_PERM,
2680         },
2681 };
2682 
2683 static const struct genl_multicast_group team_nl_mcgrps[] = {
2684         { .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, },
2685 };
2686 
2687 static int team_nl_send_multicast(struct sk_buff *skb,
2688                                   struct team *team, u32 portid)
2689 {
2690         return genlmsg_multicast_netns(&team_nl_family, dev_net(team->dev),
2691                                        skb, 0, 0, GFP_KERNEL);
2692 }
2693 
2694 static int team_nl_send_event_options_get(struct team *team,
2695                                           struct list_head *sel_opt_inst_list)
2696 {
2697         return team_nl_send_options_get(team, 0, 0, 0, team_nl_send_multicast,
2698                                         sel_opt_inst_list);
2699 }
2700 
2701 static int team_nl_send_event_port_get(struct team *team,
2702                                        struct team_port *port)
2703 {
2704         return team_nl_send_port_list_get(team, 0, 0, 0, team_nl_send_multicast,
2705                                           port);
2706 }
2707 
2708 static int team_nl_init(void)
2709 {
2710         return genl_register_family_with_ops_groups(&team_nl_family, team_nl_ops,
2711                                                     team_nl_mcgrps);
2712 }
2713 
2714 static void team_nl_fini(void)
2715 {
2716         genl_unregister_family(&team_nl_family);
2717 }
2718 
2719 
2720 /******************
2721  * Change checkers
2722  ******************/
2723 
2724 static void __team_options_change_check(struct team *team)
2725 {
2726         int err;
2727         struct team_option_inst *opt_inst;
2728         LIST_HEAD(sel_opt_inst_list);
2729 
2730         list_for_each_entry(opt_inst, &team->option_inst_list, list) {
2731                 if (opt_inst->changed)
2732                         list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
2733         }
2734         err = team_nl_send_event_options_get(team, &sel_opt_inst_list);
2735         if (err && err != -ESRCH)
2736                 netdev_warn(team->dev, "Failed to send options change via netlink (err %d)\n",
2737                             err);
2738 }
2739 
2740 /* rtnl lock is held */
2741 
2742 static void __team_port_change_send(struct team_port *port, bool linkup)
2743 {
2744         int err;
2745 
2746         port->changed = true;
2747         port->state.linkup = linkup;
2748         team_refresh_port_linkup(port);
2749         if (linkup) {
2750                 struct ethtool_cmd ecmd;
2751 
2752                 err = __ethtool_get_settings(port->dev, &ecmd);
2753                 if (!err) {
2754                         port->state.speed = ethtool_cmd_speed(&ecmd);
2755                         port->state.duplex = ecmd.duplex;
2756                         goto send_event;
2757                 }
2758         }
2759         port->state.speed = 0;
2760         port->state.duplex = 0;
2761 
2762 send_event:
2763         err = team_nl_send_event_port_get(port->team, port);
2764         if (err && err != -ESRCH)
2765                 netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink (err %d)\n",
2766                             port->dev->name, err);
2767 
2768 }
2769 
2770 static void __team_carrier_check(struct team *team)
2771 {
2772         struct team_port *port;
2773         bool team_linkup;
2774 
2775         if (team->user_carrier_enabled)
2776                 return;
2777 
2778         team_linkup = false;
2779         list_for_each_entry(port, &team->port_list, list) {
2780                 if (port->linkup) {
2781                         team_linkup = true;
2782                         break;
2783                 }
2784         }
2785 
2786         if (team_linkup)
2787                 netif_carrier_on(team->dev);
2788         else
2789                 netif_carrier_off(team->dev);
2790 }
2791 
2792 static void __team_port_change_check(struct team_port *port, bool linkup)
2793 {
2794         if (port->state.linkup != linkup)
2795                 __team_port_change_send(port, linkup);
2796         __team_carrier_check(port->team);
2797 }
2798 
2799 static void __team_port_change_port_added(struct team_port *port, bool linkup)
2800 {
2801         __team_port_change_send(port, linkup);
2802         __team_carrier_check(port->team);
2803 }
2804 
2805 static void __team_port_change_port_removed(struct team_port *port)
2806 {
2807         port->removed = true;
2808         __team_port_change_send(port, false);
2809         __team_carrier_check(port->team);
2810 }
2811 
2812 static void team_port_change_check(struct team_port *port, bool linkup)
2813 {
2814         struct team *team = port->team;
2815 
2816         mutex_lock(&team->lock);
2817         __team_port_change_check(port, linkup);
2818         mutex_unlock(&team->lock);
2819 }
2820 
2821 
2822 /************************************
2823  * Net device notifier event handler
2824  ************************************/
2825 
2826 static int team_device_event(struct notifier_block *unused,
2827                              unsigned long event, void *ptr)
2828 {
2829         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2830         struct team_port *port;
2831 
2832         port = team_port_get_rtnl(dev);
2833         if (!port)
2834                 return NOTIFY_DONE;
2835 
2836         switch (event) {
2837         case NETDEV_UP:
2838                 if (netif_carrier_ok(dev))
2839                         team_port_change_check(port, true);
2840                 break;
2841         case NETDEV_DOWN:
2842                 team_port_change_check(port, false);
2843                 break;
2844         case NETDEV_CHANGE:
2845                 if (netif_running(port->dev))
2846                         team_port_change_check(port,
2847                                                !!netif_carrier_ok(port->dev));
2848                 break;
2849         case NETDEV_UNREGISTER:
2850                 team_del_slave(port->team->dev, dev);
2851                 break;
2852         case NETDEV_FEAT_CHANGE:
2853                 team_compute_features(port->team);
2854                 break;
2855         case NETDEV_PRECHANGEMTU:
2856                 /* Forbid to change mtu of underlaying device */
2857                 if (!port->team->port_mtu_change_allowed)
2858                         return NOTIFY_BAD;
2859                 break;
2860         case NETDEV_PRE_TYPE_CHANGE:
2861                 /* Forbid to change type of underlaying device */
2862                 return NOTIFY_BAD;
2863         case NETDEV_RESEND_IGMP:
2864                 /* Propagate to master device */
2865                 call_netdevice_notifiers(event, port->team->dev);
2866                 break;
2867         }
2868         return NOTIFY_DONE;
2869 }
2870 
2871 static struct notifier_block team_notifier_block __read_mostly = {
2872         .notifier_call = team_device_event,
2873 };
2874 
2875 
2876 /***********************
2877  * Module init and exit
2878  ***********************/
2879 
2880 static int __init team_module_init(void)
2881 {
2882         int err;
2883 
2884         register_netdevice_notifier(&team_notifier_block);
2885 
2886         err = rtnl_link_register(&team_link_ops);
2887         if (err)
2888                 goto err_rtnl_reg;
2889 
2890         err = team_nl_init();
2891         if (err)
2892                 goto err_nl_init;
2893 
2894         return 0;
2895 
2896 err_nl_init:
2897         rtnl_link_unregister(&team_link_ops);
2898 
2899 err_rtnl_reg:
2900         unregister_netdevice_notifier(&team_notifier_block);
2901 
2902         return err;
2903 }
2904 
2905 static void __exit team_module_exit(void)
2906 {
2907         team_nl_fini();
2908         rtnl_link_unregister(&team_link_ops);
2909         unregister_netdevice_notifier(&team_notifier_block);
2910 }
2911 
2912 module_init(team_module_init);
2913 module_exit(team_module_exit);
2914 
2915 MODULE_LICENSE("GPL v2");
2916 MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
2917 MODULE_DESCRIPTION("Ethernet team device driver");
2918 MODULE_ALIAS_RTNL_LINK(DRV_NAME);
2919 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us