Version:  2.0.40 2.2.26 2.4.37 3.11 3.12 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2 4.3 4.4 4.5 4.6 4.7 4.8

Linux/drivers/net/team/team.c

  1 /*
  2  * drivers/net/team/team.c - Network team device driver
  3  * Copyright (c) 2011 Jiri Pirko <jpirko@redhat.com>
  4  *
  5  * This program is free software; you can redistribute it and/or modify
  6  * it under the terms of the GNU General Public License as published by
  7  * the Free Software Foundation; either version 2 of the License, or
  8  * (at your option) any later version.
  9  */
 10 
 11 #include <linux/kernel.h>
 12 #include <linux/types.h>
 13 #include <linux/module.h>
 14 #include <linux/init.h>
 15 #include <linux/slab.h>
 16 #include <linux/rcupdate.h>
 17 #include <linux/errno.h>
 18 #include <linux/ctype.h>
 19 #include <linux/notifier.h>
 20 #include <linux/netdevice.h>
 21 #include <linux/netpoll.h>
 22 #include <linux/if_vlan.h>
 23 #include <linux/if_arp.h>
 24 #include <linux/socket.h>
 25 #include <linux/etherdevice.h>
 26 #include <linux/rtnetlink.h>
 27 #include <net/rtnetlink.h>
 28 #include <net/genetlink.h>
 29 #include <net/netlink.h>
 30 #include <net/sch_generic.h>
 31 #include <net/switchdev.h>
 32 #include <generated/utsrelease.h>
 33 #include <linux/if_team.h>
 34 
 35 #define DRV_NAME "team"
 36 
 37 
 38 /**********
 39  * Helpers
 40  **********/
 41 
 42 #define team_port_exists(dev) (dev->priv_flags & IFF_TEAM_PORT)
 43 
 44 static struct team_port *team_port_get_rcu(const struct net_device *dev)
 45 {
 46         return rcu_dereference(dev->rx_handler_data);
 47 }
 48 
 49 static struct team_port *team_port_get_rtnl(const struct net_device *dev)
 50 {
 51         struct team_port *port = rtnl_dereference(dev->rx_handler_data);
 52 
 53         return team_port_exists(dev) ? port : NULL;
 54 }
 55 
 56 /*
 57  * Since the ability to change device address for open port device is tested in
 58  * team_port_add, this function can be called without control of return value
 59  */
 60 static int __set_port_dev_addr(struct net_device *port_dev,
 61                                const unsigned char *dev_addr)
 62 {
 63         struct sockaddr addr;
 64 
 65         memcpy(addr.sa_data, dev_addr, port_dev->addr_len);
 66         addr.sa_family = port_dev->type;
 67         return dev_set_mac_address(port_dev, &addr);
 68 }
 69 
 70 static int team_port_set_orig_dev_addr(struct team_port *port)
 71 {
 72         return __set_port_dev_addr(port->dev, port->orig.dev_addr);
 73 }
 74 
 75 static int team_port_set_team_dev_addr(struct team *team,
 76                                        struct team_port *port)
 77 {
 78         return __set_port_dev_addr(port->dev, team->dev->dev_addr);
 79 }
 80 
 81 int team_modeop_port_enter(struct team *team, struct team_port *port)
 82 {
 83         return team_port_set_team_dev_addr(team, port);
 84 }
 85 EXPORT_SYMBOL(team_modeop_port_enter);
 86 
 87 void team_modeop_port_change_dev_addr(struct team *team,
 88                                       struct team_port *port)
 89 {
 90         team_port_set_team_dev_addr(team, port);
 91 }
 92 EXPORT_SYMBOL(team_modeop_port_change_dev_addr);
 93 
 94 static void team_lower_state_changed(struct team_port *port)
 95 {
 96         struct netdev_lag_lower_state_info info;
 97 
 98         info.link_up = port->linkup;
 99         info.tx_enabled = team_port_enabled(port);
100         netdev_lower_state_changed(port->dev, &info);
101 }
102 
103 static void team_refresh_port_linkup(struct team_port *port)
104 {
105         bool new_linkup = port->user.linkup_enabled ? port->user.linkup :
106                                                       port->state.linkup;
107 
108         if (port->linkup != new_linkup) {
109                 port->linkup = new_linkup;
110                 team_lower_state_changed(port);
111         }
112 }
113 
114 
115 /*******************
116  * Options handling
117  *******************/
118 
119 struct team_option_inst { /* One for each option instance */
120         struct list_head list;
121         struct list_head tmp_list;
122         struct team_option *option;
123         struct team_option_inst_info info;
124         bool changed;
125         bool removed;
126 };
127 
128 static struct team_option *__team_find_option(struct team *team,
129                                               const char *opt_name)
130 {
131         struct team_option *option;
132 
133         list_for_each_entry(option, &team->option_list, list) {
134                 if (strcmp(option->name, opt_name) == 0)
135                         return option;
136         }
137         return NULL;
138 }
139 
140 static void __team_option_inst_del(struct team_option_inst *opt_inst)
141 {
142         list_del(&opt_inst->list);
143         kfree(opt_inst);
144 }
145 
146 static void __team_option_inst_del_option(struct team *team,
147                                           struct team_option *option)
148 {
149         struct team_option_inst *opt_inst, *tmp;
150 
151         list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
152                 if (opt_inst->option == option)
153                         __team_option_inst_del(opt_inst);
154         }
155 }
156 
157 static int __team_option_inst_add(struct team *team, struct team_option *option,
158                                   struct team_port *port)
159 {
160         struct team_option_inst *opt_inst;
161         unsigned int array_size;
162         unsigned int i;
163         int err;
164 
165         array_size = option->array_size;
166         if (!array_size)
167                 array_size = 1; /* No array but still need one instance */
168 
169         for (i = 0; i < array_size; i++) {
170                 opt_inst = kmalloc(sizeof(*opt_inst), GFP_KERNEL);
171                 if (!opt_inst)
172                         return -ENOMEM;
173                 opt_inst->option = option;
174                 opt_inst->info.port = port;
175                 opt_inst->info.array_index = i;
176                 opt_inst->changed = true;
177                 opt_inst->removed = false;
178                 list_add_tail(&opt_inst->list, &team->option_inst_list);
179                 if (option->init) {
180                         err = option->init(team, &opt_inst->info);
181                         if (err)
182                                 return err;
183                 }
184 
185         }
186         return 0;
187 }
188 
189 static int __team_option_inst_add_option(struct team *team,
190                                          struct team_option *option)
191 {
192         int err;
193 
194         if (!option->per_port) {
195                 err = __team_option_inst_add(team, option, NULL);
196                 if (err)
197                         goto inst_del_option;
198         }
199         return 0;
200 
201 inst_del_option:
202         __team_option_inst_del_option(team, option);
203         return err;
204 }
205 
206 static void __team_option_inst_mark_removed_option(struct team *team,
207                                                    struct team_option *option)
208 {
209         struct team_option_inst *opt_inst;
210 
211         list_for_each_entry(opt_inst, &team->option_inst_list, list) {
212                 if (opt_inst->option == option) {
213                         opt_inst->changed = true;
214                         opt_inst->removed = true;
215                 }
216         }
217 }
218 
219 static void __team_option_inst_del_port(struct team *team,
220                                         struct team_port *port)
221 {
222         struct team_option_inst *opt_inst, *tmp;
223 
224         list_for_each_entry_safe(opt_inst, tmp, &team->option_inst_list, list) {
225                 if (opt_inst->option->per_port &&
226                     opt_inst->info.port == port)
227                         __team_option_inst_del(opt_inst);
228         }
229 }
230 
231 static int __team_option_inst_add_port(struct team *team,
232                                        struct team_port *port)
233 {
234         struct team_option *option;
235         int err;
236 
237         list_for_each_entry(option, &team->option_list, list) {
238                 if (!option->per_port)
239                         continue;
240                 err = __team_option_inst_add(team, option, port);
241                 if (err)
242                         goto inst_del_port;
243         }
244         return 0;
245 
246 inst_del_port:
247         __team_option_inst_del_port(team, port);
248         return err;
249 }
250 
251 static void __team_option_inst_mark_removed_port(struct team *team,
252                                                  struct team_port *port)
253 {
254         struct team_option_inst *opt_inst;
255 
256         list_for_each_entry(opt_inst, &team->option_inst_list, list) {
257                 if (opt_inst->info.port == port) {
258                         opt_inst->changed = true;
259                         opt_inst->removed = true;
260                 }
261         }
262 }
263 
264 static int __team_options_register(struct team *team,
265                                    const struct team_option *option,
266                                    size_t option_count)
267 {
268         int i;
269         struct team_option **dst_opts;
270         int err;
271 
272         dst_opts = kzalloc(sizeof(struct team_option *) * option_count,
273                            GFP_KERNEL);
274         if (!dst_opts)
275                 return -ENOMEM;
276         for (i = 0; i < option_count; i++, option++) {
277                 if (__team_find_option(team, option->name)) {
278                         err = -EEXIST;
279                         goto alloc_rollback;
280                 }
281                 dst_opts[i] = kmemdup(option, sizeof(*option), GFP_KERNEL);
282                 if (!dst_opts[i]) {
283                         err = -ENOMEM;
284                         goto alloc_rollback;
285                 }
286         }
287 
288         for (i = 0; i < option_count; i++) {
289                 err = __team_option_inst_add_option(team, dst_opts[i]);
290                 if (err)
291                         goto inst_rollback;
292                 list_add_tail(&dst_opts[i]->list, &team->option_list);
293         }
294 
295         kfree(dst_opts);
296         return 0;
297 
298 inst_rollback:
299         for (i--; i >= 0; i--)
300                 __team_option_inst_del_option(team, dst_opts[i]);
301 
302         i = option_count - 1;
303 alloc_rollback:
304         for (i--; i >= 0; i--)
305                 kfree(dst_opts[i]);
306 
307         kfree(dst_opts);
308         return err;
309 }
310 
311 static void __team_options_mark_removed(struct team *team,
312                                         const struct team_option *option,
313                                         size_t option_count)
314 {
315         int i;
316 
317         for (i = 0; i < option_count; i++, option++) {
318                 struct team_option *del_opt;
319 
320                 del_opt = __team_find_option(team, option->name);
321                 if (del_opt)
322                         __team_option_inst_mark_removed_option(team, del_opt);
323         }
324 }
325 
326 static void __team_options_unregister(struct team *team,
327                                       const struct team_option *option,
328                                       size_t option_count)
329 {
330         int i;
331 
332         for (i = 0; i < option_count; i++, option++) {
333                 struct team_option *del_opt;
334 
335                 del_opt = __team_find_option(team, option->name);
336                 if (del_opt) {
337                         __team_option_inst_del_option(team, del_opt);
338                         list_del(&del_opt->list);
339                         kfree(del_opt);
340                 }
341         }
342 }
343 
344 static void __team_options_change_check(struct team *team);
345 
346 int team_options_register(struct team *team,
347                           const struct team_option *option,
348                           size_t option_count)
349 {
350         int err;
351 
352         err = __team_options_register(team, option, option_count);
353         if (err)
354                 return err;
355         __team_options_change_check(team);
356         return 0;
357 }
358 EXPORT_SYMBOL(team_options_register);
359 
360 void team_options_unregister(struct team *team,
361                              const struct team_option *option,
362                              size_t option_count)
363 {
364         __team_options_mark_removed(team, option, option_count);
365         __team_options_change_check(team);
366         __team_options_unregister(team, option, option_count);
367 }
368 EXPORT_SYMBOL(team_options_unregister);
369 
370 static int team_option_get(struct team *team,
371                            struct team_option_inst *opt_inst,
372                            struct team_gsetter_ctx *ctx)
373 {
374         if (!opt_inst->option->getter)
375                 return -EOPNOTSUPP;
376         return opt_inst->option->getter(team, ctx);
377 }
378 
379 static int team_option_set(struct team *team,
380                            struct team_option_inst *opt_inst,
381                            struct team_gsetter_ctx *ctx)
382 {
383         if (!opt_inst->option->setter)
384                 return -EOPNOTSUPP;
385         return opt_inst->option->setter(team, ctx);
386 }
387 
388 void team_option_inst_set_change(struct team_option_inst_info *opt_inst_info)
389 {
390         struct team_option_inst *opt_inst;
391 
392         opt_inst = container_of(opt_inst_info, struct team_option_inst, info);
393         opt_inst->changed = true;
394 }
395 EXPORT_SYMBOL(team_option_inst_set_change);
396 
397 void team_options_change_check(struct team *team)
398 {
399         __team_options_change_check(team);
400 }
401 EXPORT_SYMBOL(team_options_change_check);
402 
403 
404 /****************
405  * Mode handling
406  ****************/
407 
408 static LIST_HEAD(mode_list);
409 static DEFINE_SPINLOCK(mode_list_lock);
410 
411 struct team_mode_item {
412         struct list_head list;
413         const struct team_mode *mode;
414 };
415 
416 static struct team_mode_item *__find_mode(const char *kind)
417 {
418         struct team_mode_item *mitem;
419 
420         list_for_each_entry(mitem, &mode_list, list) {
421                 if (strcmp(mitem->mode->kind, kind) == 0)
422                         return mitem;
423         }
424         return NULL;
425 }
426 
427 static bool is_good_mode_name(const char *name)
428 {
429         while (*name != '\0') {
430                 if (!isalpha(*name) && !isdigit(*name) && *name != '_')
431                         return false;
432                 name++;
433         }
434         return true;
435 }
436 
437 int team_mode_register(const struct team_mode *mode)
438 {
439         int err = 0;
440         struct team_mode_item *mitem;
441 
442         if (!is_good_mode_name(mode->kind) ||
443             mode->priv_size > TEAM_MODE_PRIV_SIZE)
444                 return -EINVAL;
445 
446         mitem = kmalloc(sizeof(*mitem), GFP_KERNEL);
447         if (!mitem)
448                 return -ENOMEM;
449 
450         spin_lock(&mode_list_lock);
451         if (__find_mode(mode->kind)) {
452                 err = -EEXIST;
453                 kfree(mitem);
454                 goto unlock;
455         }
456         mitem->mode = mode;
457         list_add_tail(&mitem->list, &mode_list);
458 unlock:
459         spin_unlock(&mode_list_lock);
460         return err;
461 }
462 EXPORT_SYMBOL(team_mode_register);
463 
464 void team_mode_unregister(const struct team_mode *mode)
465 {
466         struct team_mode_item *mitem;
467 
468         spin_lock(&mode_list_lock);
469         mitem = __find_mode(mode->kind);
470         if (mitem) {
471                 list_del_init(&mitem->list);
472                 kfree(mitem);
473         }
474         spin_unlock(&mode_list_lock);
475 }
476 EXPORT_SYMBOL(team_mode_unregister);
477 
478 static const struct team_mode *team_mode_get(const char *kind)
479 {
480         struct team_mode_item *mitem;
481         const struct team_mode *mode = NULL;
482 
483         spin_lock(&mode_list_lock);
484         mitem = __find_mode(kind);
485         if (!mitem) {
486                 spin_unlock(&mode_list_lock);
487                 request_module("team-mode-%s", kind);
488                 spin_lock(&mode_list_lock);
489                 mitem = __find_mode(kind);
490         }
491         if (mitem) {
492                 mode = mitem->mode;
493                 if (!try_module_get(mode->owner))
494                         mode = NULL;
495         }
496 
497         spin_unlock(&mode_list_lock);
498         return mode;
499 }
500 
501 static void team_mode_put(const struct team_mode *mode)
502 {
503         module_put(mode->owner);
504 }
505 
506 static bool team_dummy_transmit(struct team *team, struct sk_buff *skb)
507 {
508         dev_kfree_skb_any(skb);
509         return false;
510 }
511 
512 static rx_handler_result_t team_dummy_receive(struct team *team,
513                                               struct team_port *port,
514                                               struct sk_buff *skb)
515 {
516         return RX_HANDLER_ANOTHER;
517 }
518 
519 static const struct team_mode __team_no_mode = {
520         .kind           = "*NOMODE*",
521 };
522 
523 static bool team_is_mode_set(struct team *team)
524 {
525         return team->mode != &__team_no_mode;
526 }
527 
528 static void team_set_no_mode(struct team *team)
529 {
530         team->user_carrier_enabled = false;
531         team->mode = &__team_no_mode;
532 }
533 
534 static void team_adjust_ops(struct team *team)
535 {
536         /*
537          * To avoid checks in rx/tx skb paths, ensure here that non-null and
538          * correct ops are always set.
539          */
540 
541         if (!team->en_port_count || !team_is_mode_set(team) ||
542             !team->mode->ops->transmit)
543                 team->ops.transmit = team_dummy_transmit;
544         else
545                 team->ops.transmit = team->mode->ops->transmit;
546 
547         if (!team->en_port_count || !team_is_mode_set(team) ||
548             !team->mode->ops->receive)
549                 team->ops.receive = team_dummy_receive;
550         else
551                 team->ops.receive = team->mode->ops->receive;
552 }
553 
554 /*
555  * We can benefit from the fact that it's ensured no port is present
556  * at the time of mode change. Therefore no packets are in fly so there's no
557  * need to set mode operations in any special way.
558  */
559 static int __team_change_mode(struct team *team,
560                               const struct team_mode *new_mode)
561 {
562         /* Check if mode was previously set and do cleanup if so */
563         if (team_is_mode_set(team)) {
564                 void (*exit_op)(struct team *team) = team->ops.exit;
565 
566                 /* Clear ops area so no callback is called any longer */
567                 memset(&team->ops, 0, sizeof(struct team_mode_ops));
568                 team_adjust_ops(team);
569 
570                 if (exit_op)
571                         exit_op(team);
572                 team_mode_put(team->mode);
573                 team_set_no_mode(team);
574                 /* zero private data area */
575                 memset(&team->mode_priv, 0,
576                        sizeof(struct team) - offsetof(struct team, mode_priv));
577         }
578 
579         if (!new_mode)
580                 return 0;
581 
582         if (new_mode->ops->init) {
583                 int err;
584 
585                 err = new_mode->ops->init(team);
586                 if (err)
587                         return err;
588         }
589 
590         team->mode = new_mode;
591         memcpy(&team->ops, new_mode->ops, sizeof(struct team_mode_ops));
592         team_adjust_ops(team);
593 
594         return 0;
595 }
596 
597 static int team_change_mode(struct team *team, const char *kind)
598 {
599         const struct team_mode *new_mode;
600         struct net_device *dev = team->dev;
601         int err;
602 
603         if (!list_empty(&team->port_list)) {
604                 netdev_err(dev, "No ports can be present during mode change\n");
605                 return -EBUSY;
606         }
607 
608         if (team_is_mode_set(team) && strcmp(team->mode->kind, kind) == 0) {
609                 netdev_err(dev, "Unable to change to the same mode the team is in\n");
610                 return -EINVAL;
611         }
612 
613         new_mode = team_mode_get(kind);
614         if (!new_mode) {
615                 netdev_err(dev, "Mode \"%s\" not found\n", kind);
616                 return -EINVAL;
617         }
618 
619         err = __team_change_mode(team, new_mode);
620         if (err) {
621                 netdev_err(dev, "Failed to change to mode \"%s\"\n", kind);
622                 team_mode_put(new_mode);
623                 return err;
624         }
625 
626         netdev_info(dev, "Mode changed to \"%s\"\n", kind);
627         return 0;
628 }
629 
630 
631 /*********************
632  * Peers notification
633  *********************/
634 
635 static void team_notify_peers_work(struct work_struct *work)
636 {
637         struct team *team;
638         int val;
639 
640         team = container_of(work, struct team, notify_peers.dw.work);
641 
642         if (!rtnl_trylock()) {
643                 schedule_delayed_work(&team->notify_peers.dw, 0);
644                 return;
645         }
646         val = atomic_dec_if_positive(&team->notify_peers.count_pending);
647         if (val < 0) {
648                 rtnl_unlock();
649                 return;
650         }
651         call_netdevice_notifiers(NETDEV_NOTIFY_PEERS, team->dev);
652         rtnl_unlock();
653         if (val)
654                 schedule_delayed_work(&team->notify_peers.dw,
655                                       msecs_to_jiffies(team->notify_peers.interval));
656 }
657 
658 static void team_notify_peers(struct team *team)
659 {
660         if (!team->notify_peers.count || !netif_running(team->dev))
661                 return;
662         atomic_add(team->notify_peers.count, &team->notify_peers.count_pending);
663         schedule_delayed_work(&team->notify_peers.dw, 0);
664 }
665 
666 static void team_notify_peers_init(struct team *team)
667 {
668         INIT_DELAYED_WORK(&team->notify_peers.dw, team_notify_peers_work);
669 }
670 
671 static void team_notify_peers_fini(struct team *team)
672 {
673         cancel_delayed_work_sync(&team->notify_peers.dw);
674 }
675 
676 
677 /*******************************
678  * Send multicast group rejoins
679  *******************************/
680 
681 static void team_mcast_rejoin_work(struct work_struct *work)
682 {
683         struct team *team;
684         int val;
685 
686         team = container_of(work, struct team, mcast_rejoin.dw.work);
687 
688         if (!rtnl_trylock()) {
689                 schedule_delayed_work(&team->mcast_rejoin.dw, 0);
690                 return;
691         }
692         val = atomic_dec_if_positive(&team->mcast_rejoin.count_pending);
693         if (val < 0) {
694                 rtnl_unlock();
695                 return;
696         }
697         call_netdevice_notifiers(NETDEV_RESEND_IGMP, team->dev);
698         rtnl_unlock();
699         if (val)
700                 schedule_delayed_work(&team->mcast_rejoin.dw,
701                                       msecs_to_jiffies(team->mcast_rejoin.interval));
702 }
703 
704 static void team_mcast_rejoin(struct team *team)
705 {
706         if (!team->mcast_rejoin.count || !netif_running(team->dev))
707                 return;
708         atomic_add(team->mcast_rejoin.count, &team->mcast_rejoin.count_pending);
709         schedule_delayed_work(&team->mcast_rejoin.dw, 0);
710 }
711 
712 static void team_mcast_rejoin_init(struct team *team)
713 {
714         INIT_DELAYED_WORK(&team->mcast_rejoin.dw, team_mcast_rejoin_work);
715 }
716 
717 static void team_mcast_rejoin_fini(struct team *team)
718 {
719         cancel_delayed_work_sync(&team->mcast_rejoin.dw);
720 }
721 
722 
723 /************************
724  * Rx path frame handler
725  ************************/
726 
727 /* note: already called with rcu_read_lock */
728 static rx_handler_result_t team_handle_frame(struct sk_buff **pskb)
729 {
730         struct sk_buff *skb = *pskb;
731         struct team_port *port;
732         struct team *team;
733         rx_handler_result_t res;
734 
735         skb = skb_share_check(skb, GFP_ATOMIC);
736         if (!skb)
737                 return RX_HANDLER_CONSUMED;
738 
739         *pskb = skb;
740 
741         port = team_port_get_rcu(skb->dev);
742         team = port->team;
743         if (!team_port_enabled(port)) {
744                 /* allow exact match delivery for disabled ports */
745                 res = RX_HANDLER_EXACT;
746         } else {
747                 res = team->ops.receive(team, port, skb);
748         }
749         if (res == RX_HANDLER_ANOTHER) {
750                 struct team_pcpu_stats *pcpu_stats;
751 
752                 pcpu_stats = this_cpu_ptr(team->pcpu_stats);
753                 u64_stats_update_begin(&pcpu_stats->syncp);
754                 pcpu_stats->rx_packets++;
755                 pcpu_stats->rx_bytes += skb->len;
756                 if (skb->pkt_type == PACKET_MULTICAST)
757                         pcpu_stats->rx_multicast++;
758                 u64_stats_update_end(&pcpu_stats->syncp);
759 
760                 skb->dev = team->dev;
761         } else if (res == RX_HANDLER_EXACT) {
762                 this_cpu_inc(team->pcpu_stats->rx_nohandler);
763         } else {
764                 this_cpu_inc(team->pcpu_stats->rx_dropped);
765         }
766 
767         return res;
768 }
769 
770 
771 /*************************************
772  * Multiqueue Tx port select override
773  *************************************/
774 
775 static int team_queue_override_init(struct team *team)
776 {
777         struct list_head *listarr;
778         unsigned int queue_cnt = team->dev->num_tx_queues - 1;
779         unsigned int i;
780 
781         if (!queue_cnt)
782                 return 0;
783         listarr = kmalloc(sizeof(struct list_head) * queue_cnt, GFP_KERNEL);
784         if (!listarr)
785                 return -ENOMEM;
786         team->qom_lists = listarr;
787         for (i = 0; i < queue_cnt; i++)
788                 INIT_LIST_HEAD(listarr++);
789         return 0;
790 }
791 
792 static void team_queue_override_fini(struct team *team)
793 {
794         kfree(team->qom_lists);
795 }
796 
797 static struct list_head *__team_get_qom_list(struct team *team, u16 queue_id)
798 {
799         return &team->qom_lists[queue_id - 1];
800 }
801 
802 /*
803  * note: already called with rcu_read_lock
804  */
805 static bool team_queue_override_transmit(struct team *team, struct sk_buff *skb)
806 {
807         struct list_head *qom_list;
808         struct team_port *port;
809 
810         if (!team->queue_override_enabled || !skb->queue_mapping)
811                 return false;
812         qom_list = __team_get_qom_list(team, skb->queue_mapping);
813         list_for_each_entry_rcu(port, qom_list, qom_list) {
814                 if (!team_dev_queue_xmit(team, port, skb))
815                         return true;
816         }
817         return false;
818 }
819 
820 static void __team_queue_override_port_del(struct team *team,
821                                            struct team_port *port)
822 {
823         if (!port->queue_id)
824                 return;
825         list_del_rcu(&port->qom_list);
826 }
827 
828 static bool team_queue_override_port_has_gt_prio_than(struct team_port *port,
829                                                       struct team_port *cur)
830 {
831         if (port->priority < cur->priority)
832                 return true;
833         if (port->priority > cur->priority)
834                 return false;
835         if (port->index < cur->index)
836                 return true;
837         return false;
838 }
839 
840 static void __team_queue_override_port_add(struct team *team,
841                                            struct team_port *port)
842 {
843         struct team_port *cur;
844         struct list_head *qom_list;
845         struct list_head *node;
846 
847         if (!port->queue_id)
848                 return;
849         qom_list = __team_get_qom_list(team, port->queue_id);
850         node = qom_list;
851         list_for_each_entry(cur, qom_list, qom_list) {
852                 if (team_queue_override_port_has_gt_prio_than(port, cur))
853                         break;
854                 node = &cur->qom_list;
855         }
856         list_add_tail_rcu(&port->qom_list, node);
857 }
858 
859 static void __team_queue_override_enabled_check(struct team *team)
860 {
861         struct team_port *port;
862         bool enabled = false;
863 
864         list_for_each_entry(port, &team->port_list, list) {
865                 if (port->queue_id) {
866                         enabled = true;
867                         break;
868                 }
869         }
870         if (enabled == team->queue_override_enabled)
871                 return;
872         netdev_dbg(team->dev, "%s queue override\n",
873                    enabled ? "Enabling" : "Disabling");
874         team->queue_override_enabled = enabled;
875 }
876 
877 static void team_queue_override_port_prio_changed(struct team *team,
878                                                   struct team_port *port)
879 {
880         if (!port->queue_id || team_port_enabled(port))
881                 return;
882         __team_queue_override_port_del(team, port);
883         __team_queue_override_port_add(team, port);
884         __team_queue_override_enabled_check(team);
885 }
886 
887 static void team_queue_override_port_change_queue_id(struct team *team,
888                                                      struct team_port *port,
889                                                      u16 new_queue_id)
890 {
891         if (team_port_enabled(port)) {
892                 __team_queue_override_port_del(team, port);
893                 port->queue_id = new_queue_id;
894                 __team_queue_override_port_add(team, port);
895                 __team_queue_override_enabled_check(team);
896         } else {
897                 port->queue_id = new_queue_id;
898         }
899 }
900 
901 static void team_queue_override_port_add(struct team *team,
902                                          struct team_port *port)
903 {
904         __team_queue_override_port_add(team, port);
905         __team_queue_override_enabled_check(team);
906 }
907 
908 static void team_queue_override_port_del(struct team *team,
909                                          struct team_port *port)
910 {
911         __team_queue_override_port_del(team, port);
912         __team_queue_override_enabled_check(team);
913 }
914 
915 
916 /****************
917  * Port handling
918  ****************/
919 
920 static bool team_port_find(const struct team *team,
921                            const struct team_port *port)
922 {
923         struct team_port *cur;
924 
925         list_for_each_entry(cur, &team->port_list, list)
926                 if (cur == port)
927                         return true;
928         return false;
929 }
930 
931 /*
932  * Enable/disable port by adding to enabled port hashlist and setting
933  * port->index (Might be racy so reader could see incorrect ifindex when
934  * processing a flying packet, but that is not a problem). Write guarded
935  * by team->lock.
936  */
937 static void team_port_enable(struct team *team,
938                              struct team_port *port)
939 {
940         if (team_port_enabled(port))
941                 return;
942         port->index = team->en_port_count++;
943         hlist_add_head_rcu(&port->hlist,
944                            team_port_index_hash(team, port->index));
945         team_adjust_ops(team);
946         team_queue_override_port_add(team, port);
947         if (team->ops.port_enabled)
948                 team->ops.port_enabled(team, port);
949         team_notify_peers(team);
950         team_mcast_rejoin(team);
951         team_lower_state_changed(port);
952 }
953 
954 static void __reconstruct_port_hlist(struct team *team, int rm_index)
955 {
956         int i;
957         struct team_port *port;
958 
959         for (i = rm_index + 1; i < team->en_port_count; i++) {
960                 port = team_get_port_by_index(team, i);
961                 hlist_del_rcu(&port->hlist);
962                 port->index--;
963                 hlist_add_head_rcu(&port->hlist,
964                                    team_port_index_hash(team, port->index));
965         }
966 }
967 
968 static void team_port_disable(struct team *team,
969                               struct team_port *port)
970 {
971         if (!team_port_enabled(port))
972                 return;
973         if (team->ops.port_disabled)
974                 team->ops.port_disabled(team, port);
975         hlist_del_rcu(&port->hlist);
976         __reconstruct_port_hlist(team, port->index);
977         port->index = -1;
978         team->en_port_count--;
979         team_queue_override_port_del(team, port);
980         team_adjust_ops(team);
981         team_notify_peers(team);
982         team_mcast_rejoin(team);
983         team_lower_state_changed(port);
984 }
985 
986 #define TEAM_VLAN_FEATURES (NETIF_F_HW_CSUM | NETIF_F_SG | \
987                             NETIF_F_FRAGLIST | NETIF_F_ALL_TSO | \
988                             NETIF_F_HIGHDMA | NETIF_F_LRO)
989 
990 #define TEAM_ENC_FEATURES       (NETIF_F_HW_CSUM | NETIF_F_SG | \
991                                  NETIF_F_RXCSUM | NETIF_F_ALL_TSO)
992 
993 static void ___team_compute_features(struct team *team)
994 {
995         struct team_port *port;
996         u32 vlan_features = TEAM_VLAN_FEATURES & NETIF_F_ALL_FOR_ALL;
997         netdev_features_t enc_features  = TEAM_ENC_FEATURES;
998         unsigned short max_hard_header_len = ETH_HLEN;
999         unsigned int dst_release_flag = IFF_XMIT_DST_RELEASE |
1000                                         IFF_XMIT_DST_RELEASE_PERM;
1001 
1002         list_for_each_entry(port, &team->port_list, list) {
1003                 vlan_features = netdev_increment_features(vlan_features,
1004                                         port->dev->vlan_features,
1005                                         TEAM_VLAN_FEATURES);
1006                 enc_features =
1007                         netdev_increment_features(enc_features,
1008                                                   port->dev->hw_enc_features,
1009                                                   TEAM_ENC_FEATURES);
1010 
1011 
1012                 dst_release_flag &= port->dev->priv_flags;
1013                 if (port->dev->hard_header_len > max_hard_header_len)
1014                         max_hard_header_len = port->dev->hard_header_len;
1015         }
1016 
1017         team->dev->vlan_features = vlan_features;
1018         team->dev->hw_enc_features = enc_features | NETIF_F_GSO_ENCAP_ALL;
1019         team->dev->hard_header_len = max_hard_header_len;
1020 
1021         team->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
1022         if (dst_release_flag == (IFF_XMIT_DST_RELEASE | IFF_XMIT_DST_RELEASE_PERM))
1023                 team->dev->priv_flags |= IFF_XMIT_DST_RELEASE;
1024 }
1025 
1026 static void __team_compute_features(struct team *team)
1027 {
1028         ___team_compute_features(team);
1029         netdev_change_features(team->dev);
1030 }
1031 
1032 static void team_compute_features(struct team *team)
1033 {
1034         mutex_lock(&team->lock);
1035         ___team_compute_features(team);
1036         mutex_unlock(&team->lock);
1037         netdev_change_features(team->dev);
1038 }
1039 
1040 static int team_port_enter(struct team *team, struct team_port *port)
1041 {
1042         int err = 0;
1043 
1044         dev_hold(team->dev);
1045         if (team->ops.port_enter) {
1046                 err = team->ops.port_enter(team, port);
1047                 if (err) {
1048                         netdev_err(team->dev, "Device %s failed to enter team mode\n",
1049                                    port->dev->name);
1050                         goto err_port_enter;
1051                 }
1052         }
1053 
1054         return 0;
1055 
1056 err_port_enter:
1057         dev_put(team->dev);
1058 
1059         return err;
1060 }
1061 
1062 static void team_port_leave(struct team *team, struct team_port *port)
1063 {
1064         if (team->ops.port_leave)
1065                 team->ops.port_leave(team, port);
1066         dev_put(team->dev);
1067 }
1068 
1069 #ifdef CONFIG_NET_POLL_CONTROLLER
1070 static int team_port_enable_netpoll(struct team *team, struct team_port *port)
1071 {
1072         struct netpoll *np;
1073         int err;
1074 
1075         if (!team->dev->npinfo)
1076                 return 0;
1077 
1078         np = kzalloc(sizeof(*np), GFP_KERNEL);
1079         if (!np)
1080                 return -ENOMEM;
1081 
1082         err = __netpoll_setup(np, port->dev);
1083         if (err) {
1084                 kfree(np);
1085                 return err;
1086         }
1087         port->np = np;
1088         return err;
1089 }
1090 
1091 static void team_port_disable_netpoll(struct team_port *port)
1092 {
1093         struct netpoll *np = port->np;
1094 
1095         if (!np)
1096                 return;
1097         port->np = NULL;
1098 
1099         /* Wait for transmitting packets to finish before freeing. */
1100         synchronize_rcu_bh();
1101         __netpoll_cleanup(np);
1102         kfree(np);
1103 }
1104 #else
1105 static int team_port_enable_netpoll(struct team *team, struct team_port *port)
1106 {
1107         return 0;
1108 }
1109 static void team_port_disable_netpoll(struct team_port *port)
1110 {
1111 }
1112 #endif
1113 
1114 static int team_upper_dev_link(struct team *team, struct team_port *port)
1115 {
1116         struct netdev_lag_upper_info lag_upper_info;
1117         int err;
1118 
1119         lag_upper_info.tx_type = team->mode->lag_tx_type;
1120         err = netdev_master_upper_dev_link(port->dev, team->dev, NULL,
1121                                            &lag_upper_info);
1122         if (err)
1123                 return err;
1124         port->dev->priv_flags |= IFF_TEAM_PORT;
1125         return 0;
1126 }
1127 
1128 static void team_upper_dev_unlink(struct team *team, struct team_port *port)
1129 {
1130         netdev_upper_dev_unlink(port->dev, team->dev);
1131         port->dev->priv_flags &= ~IFF_TEAM_PORT;
1132 }
1133 
1134 static void __team_port_change_port_added(struct team_port *port, bool linkup);
1135 static int team_dev_type_check_change(struct net_device *dev,
1136                                       struct net_device *port_dev);
1137 
1138 static int team_port_add(struct team *team, struct net_device *port_dev)
1139 {
1140         struct net_device *dev = team->dev;
1141         struct team_port *port;
1142         char *portname = port_dev->name;
1143         int err;
1144 
1145         if (port_dev->flags & IFF_LOOPBACK) {
1146                 netdev_err(dev, "Device %s is loopback device. Loopback devices can't be added as a team port\n",
1147                            portname);
1148                 return -EINVAL;
1149         }
1150 
1151         if (team_port_exists(port_dev)) {
1152                 netdev_err(dev, "Device %s is already a port "
1153                                 "of a team device\n", portname);
1154                 return -EBUSY;
1155         }
1156 
1157         if (port_dev->features & NETIF_F_VLAN_CHALLENGED &&
1158             vlan_uses_dev(dev)) {
1159                 netdev_err(dev, "Device %s is VLAN challenged and team device has VLAN set up\n",
1160                            portname);
1161                 return -EPERM;
1162         }
1163 
1164         err = team_dev_type_check_change(dev, port_dev);
1165         if (err)
1166                 return err;
1167 
1168         if (port_dev->flags & IFF_UP) {
1169                 netdev_err(dev, "Device %s is up. Set it down before adding it as a team port\n",
1170                            portname);
1171                 return -EBUSY;
1172         }
1173 
1174         port = kzalloc(sizeof(struct team_port) + team->mode->port_priv_size,
1175                        GFP_KERNEL);
1176         if (!port)
1177                 return -ENOMEM;
1178 
1179         port->dev = port_dev;
1180         port->team = team;
1181         INIT_LIST_HEAD(&port->qom_list);
1182 
1183         port->orig.mtu = port_dev->mtu;
1184         err = dev_set_mtu(port_dev, dev->mtu);
1185         if (err) {
1186                 netdev_dbg(dev, "Error %d calling dev_set_mtu\n", err);
1187                 goto err_set_mtu;
1188         }
1189 
1190         memcpy(port->orig.dev_addr, port_dev->dev_addr, port_dev->addr_len);
1191 
1192         err = team_port_enter(team, port);
1193         if (err) {
1194                 netdev_err(dev, "Device %s failed to enter team mode\n",
1195                            portname);
1196                 goto err_port_enter;
1197         }
1198 
1199         err = dev_open(port_dev);
1200         if (err) {
1201                 netdev_dbg(dev, "Device %s opening failed\n",
1202                            portname);
1203                 goto err_dev_open;
1204         }
1205 
1206         netif_addr_lock_bh(dev);
1207         dev_uc_sync_multiple(port_dev, dev);
1208         dev_mc_sync_multiple(port_dev, dev);
1209         netif_addr_unlock_bh(dev);
1210 
1211         err = vlan_vids_add_by_dev(port_dev, dev);
1212         if (err) {
1213                 netdev_err(dev, "Failed to add vlan ids to device %s\n",
1214                                 portname);
1215                 goto err_vids_add;
1216         }
1217 
1218         err = team_port_enable_netpoll(team, port);
1219         if (err) {
1220                 netdev_err(dev, "Failed to enable netpoll on device %s\n",
1221                            portname);
1222                 goto err_enable_netpoll;
1223         }
1224 
1225         if (!(dev->features & NETIF_F_LRO))
1226                 dev_disable_lro(port_dev);
1227 
1228         err = netdev_rx_handler_register(port_dev, team_handle_frame,
1229                                          port);
1230         if (err) {
1231                 netdev_err(dev, "Device %s failed to register rx_handler\n",
1232                            portname);
1233                 goto err_handler_register;
1234         }
1235 
1236         err = team_upper_dev_link(team, port);
1237         if (err) {
1238                 netdev_err(dev, "Device %s failed to set upper link\n",
1239                            portname);
1240                 goto err_set_upper_link;
1241         }
1242 
1243         err = __team_option_inst_add_port(team, port);
1244         if (err) {
1245                 netdev_err(dev, "Device %s failed to add per-port options\n",
1246                            portname);
1247                 goto err_option_port_add;
1248         }
1249 
1250         port->index = -1;
1251         list_add_tail_rcu(&port->list, &team->port_list);
1252         team_port_enable(team, port);
1253         __team_compute_features(team);
1254         __team_port_change_port_added(port, !!netif_carrier_ok(port_dev));
1255         __team_options_change_check(team);
1256 
1257         netdev_info(dev, "Port device %s added\n", portname);
1258 
1259         return 0;
1260 
1261 err_option_port_add:
1262         team_upper_dev_unlink(team, port);
1263 
1264 err_set_upper_link:
1265         netdev_rx_handler_unregister(port_dev);
1266 
1267 err_handler_register:
1268         team_port_disable_netpoll(port);
1269 
1270 err_enable_netpoll:
1271         vlan_vids_del_by_dev(port_dev, dev);
1272 
1273 err_vids_add:
1274         dev_uc_unsync(port_dev, dev);
1275         dev_mc_unsync(port_dev, dev);
1276         dev_close(port_dev);
1277 
1278 err_dev_open:
1279         team_port_leave(team, port);
1280         team_port_set_orig_dev_addr(port);
1281 
1282 err_port_enter:
1283         dev_set_mtu(port_dev, port->orig.mtu);
1284 
1285 err_set_mtu:
1286         kfree(port);
1287 
1288         return err;
1289 }
1290 
1291 static void __team_port_change_port_removed(struct team_port *port);
1292 
1293 static int team_port_del(struct team *team, struct net_device *port_dev)
1294 {
1295         struct net_device *dev = team->dev;
1296         struct team_port *port;
1297         char *portname = port_dev->name;
1298 
1299         port = team_port_get_rtnl(port_dev);
1300         if (!port || !team_port_find(team, port)) {
1301                 netdev_err(dev, "Device %s does not act as a port of this team\n",
1302                            portname);
1303                 return -ENOENT;
1304         }
1305 
1306         team_port_disable(team, port);
1307         list_del_rcu(&port->list);
1308         team_upper_dev_unlink(team, port);
1309         netdev_rx_handler_unregister(port_dev);
1310         team_port_disable_netpoll(port);
1311         vlan_vids_del_by_dev(port_dev, dev);
1312         dev_uc_unsync(port_dev, dev);
1313         dev_mc_unsync(port_dev, dev);
1314         dev_close(port_dev);
1315         team_port_leave(team, port);
1316 
1317         __team_option_inst_mark_removed_port(team, port);
1318         __team_options_change_check(team);
1319         __team_option_inst_del_port(team, port);
1320         __team_port_change_port_removed(port);
1321 
1322         team_port_set_orig_dev_addr(port);
1323         dev_set_mtu(port_dev, port->orig.mtu);
1324         kfree_rcu(port, rcu);
1325         netdev_info(dev, "Port device %s removed\n", portname);
1326         __team_compute_features(team);
1327 
1328         return 0;
1329 }
1330 
1331 
1332 /*****************
1333  * Net device ops
1334  *****************/
1335 
1336 static int team_mode_option_get(struct team *team, struct team_gsetter_ctx *ctx)
1337 {
1338         ctx->data.str_val = team->mode->kind;
1339         return 0;
1340 }
1341 
1342 static int team_mode_option_set(struct team *team, struct team_gsetter_ctx *ctx)
1343 {
1344         return team_change_mode(team, ctx->data.str_val);
1345 }
1346 
1347 static int team_notify_peers_count_get(struct team *team,
1348                                        struct team_gsetter_ctx *ctx)
1349 {
1350         ctx->data.u32_val = team->notify_peers.count;
1351         return 0;
1352 }
1353 
1354 static int team_notify_peers_count_set(struct team *team,
1355                                        struct team_gsetter_ctx *ctx)
1356 {
1357         team->notify_peers.count = ctx->data.u32_val;
1358         return 0;
1359 }
1360 
1361 static int team_notify_peers_interval_get(struct team *team,
1362                                           struct team_gsetter_ctx *ctx)
1363 {
1364         ctx->data.u32_val = team->notify_peers.interval;
1365         return 0;
1366 }
1367 
1368 static int team_notify_peers_interval_set(struct team *team,
1369                                           struct team_gsetter_ctx *ctx)
1370 {
1371         team->notify_peers.interval = ctx->data.u32_val;
1372         return 0;
1373 }
1374 
1375 static int team_mcast_rejoin_count_get(struct team *team,
1376                                        struct team_gsetter_ctx *ctx)
1377 {
1378         ctx->data.u32_val = team->mcast_rejoin.count;
1379         return 0;
1380 }
1381 
1382 static int team_mcast_rejoin_count_set(struct team *team,
1383                                        struct team_gsetter_ctx *ctx)
1384 {
1385         team->mcast_rejoin.count = ctx->data.u32_val;
1386         return 0;
1387 }
1388 
1389 static int team_mcast_rejoin_interval_get(struct team *team,
1390                                           struct team_gsetter_ctx *ctx)
1391 {
1392         ctx->data.u32_val = team->mcast_rejoin.interval;
1393         return 0;
1394 }
1395 
1396 static int team_mcast_rejoin_interval_set(struct team *team,
1397                                           struct team_gsetter_ctx *ctx)
1398 {
1399         team->mcast_rejoin.interval = ctx->data.u32_val;
1400         return 0;
1401 }
1402 
1403 static int team_port_en_option_get(struct team *team,
1404                                    struct team_gsetter_ctx *ctx)
1405 {
1406         struct team_port *port = ctx->info->port;
1407 
1408         ctx->data.bool_val = team_port_enabled(port);
1409         return 0;
1410 }
1411 
1412 static int team_port_en_option_set(struct team *team,
1413                                    struct team_gsetter_ctx *ctx)
1414 {
1415         struct team_port *port = ctx->info->port;
1416 
1417         if (ctx->data.bool_val)
1418                 team_port_enable(team, port);
1419         else
1420                 team_port_disable(team, port);
1421         return 0;
1422 }
1423 
1424 static int team_user_linkup_option_get(struct team *team,
1425                                        struct team_gsetter_ctx *ctx)
1426 {
1427         struct team_port *port = ctx->info->port;
1428 
1429         ctx->data.bool_val = port->user.linkup;
1430         return 0;
1431 }
1432 
1433 static void __team_carrier_check(struct team *team);
1434 
1435 static int team_user_linkup_option_set(struct team *team,
1436                                        struct team_gsetter_ctx *ctx)
1437 {
1438         struct team_port *port = ctx->info->port;
1439 
1440         port->user.linkup = ctx->data.bool_val;
1441         team_refresh_port_linkup(port);
1442         __team_carrier_check(port->team);
1443         return 0;
1444 }
1445 
1446 static int team_user_linkup_en_option_get(struct team *team,
1447                                           struct team_gsetter_ctx *ctx)
1448 {
1449         struct team_port *port = ctx->info->port;
1450 
1451         ctx->data.bool_val = port->user.linkup_enabled;
1452         return 0;
1453 }
1454 
1455 static int team_user_linkup_en_option_set(struct team *team,
1456                                           struct team_gsetter_ctx *ctx)
1457 {
1458         struct team_port *port = ctx->info->port;
1459 
1460         port->user.linkup_enabled = ctx->data.bool_val;
1461         team_refresh_port_linkup(port);
1462         __team_carrier_check(port->team);
1463         return 0;
1464 }
1465 
1466 static int team_priority_option_get(struct team *team,
1467                                     struct team_gsetter_ctx *ctx)
1468 {
1469         struct team_port *port = ctx->info->port;
1470 
1471         ctx->data.s32_val = port->priority;
1472         return 0;
1473 }
1474 
1475 static int team_priority_option_set(struct team *team,
1476                                     struct team_gsetter_ctx *ctx)
1477 {
1478         struct team_port *port = ctx->info->port;
1479         s32 priority = ctx->data.s32_val;
1480 
1481         if (port->priority == priority)
1482                 return 0;
1483         port->priority = priority;
1484         team_queue_override_port_prio_changed(team, port);
1485         return 0;
1486 }
1487 
1488 static int team_queue_id_option_get(struct team *team,
1489                                     struct team_gsetter_ctx *ctx)
1490 {
1491         struct team_port *port = ctx->info->port;
1492 
1493         ctx->data.u32_val = port->queue_id;
1494         return 0;
1495 }
1496 
1497 static int team_queue_id_option_set(struct team *team,
1498                                     struct team_gsetter_ctx *ctx)
1499 {
1500         struct team_port *port = ctx->info->port;
1501         u16 new_queue_id = ctx->data.u32_val;
1502 
1503         if (port->queue_id == new_queue_id)
1504                 return 0;
1505         if (new_queue_id >= team->dev->real_num_tx_queues)
1506                 return -EINVAL;
1507         team_queue_override_port_change_queue_id(team, port, new_queue_id);
1508         return 0;
1509 }
1510 
1511 static const struct team_option team_options[] = {
1512         {
1513                 .name = "mode",
1514                 .type = TEAM_OPTION_TYPE_STRING,
1515                 .getter = team_mode_option_get,
1516                 .setter = team_mode_option_set,
1517         },
1518         {
1519                 .name = "notify_peers_count",
1520                 .type = TEAM_OPTION_TYPE_U32,
1521                 .getter = team_notify_peers_count_get,
1522                 .setter = team_notify_peers_count_set,
1523         },
1524         {
1525                 .name = "notify_peers_interval",
1526                 .type = TEAM_OPTION_TYPE_U32,
1527                 .getter = team_notify_peers_interval_get,
1528                 .setter = team_notify_peers_interval_set,
1529         },
1530         {
1531                 .name = "mcast_rejoin_count",
1532                 .type = TEAM_OPTION_TYPE_U32,
1533                 .getter = team_mcast_rejoin_count_get,
1534                 .setter = team_mcast_rejoin_count_set,
1535         },
1536         {
1537                 .name = "mcast_rejoin_interval",
1538                 .type = TEAM_OPTION_TYPE_U32,
1539                 .getter = team_mcast_rejoin_interval_get,
1540                 .setter = team_mcast_rejoin_interval_set,
1541         },
1542         {
1543                 .name = "enabled",
1544                 .type = TEAM_OPTION_TYPE_BOOL,
1545                 .per_port = true,
1546                 .getter = team_port_en_option_get,
1547                 .setter = team_port_en_option_set,
1548         },
1549         {
1550                 .name = "user_linkup",
1551                 .type = TEAM_OPTION_TYPE_BOOL,
1552                 .per_port = true,
1553                 .getter = team_user_linkup_option_get,
1554                 .setter = team_user_linkup_option_set,
1555         },
1556         {
1557                 .name = "user_linkup_enabled",
1558                 .type = TEAM_OPTION_TYPE_BOOL,
1559                 .per_port = true,
1560                 .getter = team_user_linkup_en_option_get,
1561                 .setter = team_user_linkup_en_option_set,
1562         },
1563         {
1564                 .name = "priority",
1565                 .type = TEAM_OPTION_TYPE_S32,
1566                 .per_port = true,
1567                 .getter = team_priority_option_get,
1568                 .setter = team_priority_option_set,
1569         },
1570         {
1571                 .name = "queue_id",
1572                 .type = TEAM_OPTION_TYPE_U32,
1573                 .per_port = true,
1574                 .getter = team_queue_id_option_get,
1575                 .setter = team_queue_id_option_set,
1576         },
1577 };
1578 
1579 
1580 static int team_init(struct net_device *dev)
1581 {
1582         struct team *team = netdev_priv(dev);
1583         int i;
1584         int err;
1585 
1586         team->dev = dev;
1587         mutex_init(&team->lock);
1588         team_set_no_mode(team);
1589 
1590         team->pcpu_stats = netdev_alloc_pcpu_stats(struct team_pcpu_stats);
1591         if (!team->pcpu_stats)
1592                 return -ENOMEM;
1593 
1594         for (i = 0; i < TEAM_PORT_HASHENTRIES; i++)
1595                 INIT_HLIST_HEAD(&team->en_port_hlist[i]);
1596         INIT_LIST_HEAD(&team->port_list);
1597         err = team_queue_override_init(team);
1598         if (err)
1599                 goto err_team_queue_override_init;
1600 
1601         team_adjust_ops(team);
1602 
1603         INIT_LIST_HEAD(&team->option_list);
1604         INIT_LIST_HEAD(&team->option_inst_list);
1605 
1606         team_notify_peers_init(team);
1607         team_mcast_rejoin_init(team);
1608 
1609         err = team_options_register(team, team_options, ARRAY_SIZE(team_options));
1610         if (err)
1611                 goto err_options_register;
1612         netif_carrier_off(dev);
1613 
1614         netdev_lockdep_set_classes(dev);
1615 
1616         return 0;
1617 
1618 err_options_register:
1619         team_mcast_rejoin_fini(team);
1620         team_notify_peers_fini(team);
1621         team_queue_override_fini(team);
1622 err_team_queue_override_init:
1623         free_percpu(team->pcpu_stats);
1624 
1625         return err;
1626 }
1627 
1628 static void team_uninit(struct net_device *dev)
1629 {
1630         struct team *team = netdev_priv(dev);
1631         struct team_port *port;
1632         struct team_port *tmp;
1633 
1634         mutex_lock(&team->lock);
1635         list_for_each_entry_safe(port, tmp, &team->port_list, list)
1636                 team_port_del(team, port->dev);
1637 
1638         __team_change_mode(team, NULL); /* cleanup */
1639         __team_options_unregister(team, team_options, ARRAY_SIZE(team_options));
1640         team_mcast_rejoin_fini(team);
1641         team_notify_peers_fini(team);
1642         team_queue_override_fini(team);
1643         mutex_unlock(&team->lock);
1644 }
1645 
1646 static void team_destructor(struct net_device *dev)
1647 {
1648         struct team *team = netdev_priv(dev);
1649 
1650         free_percpu(team->pcpu_stats);
1651         free_netdev(dev);
1652 }
1653 
1654 static int team_open(struct net_device *dev)
1655 {
1656         return 0;
1657 }
1658 
1659 static int team_close(struct net_device *dev)
1660 {
1661         return 0;
1662 }
1663 
1664 /*
1665  * note: already called with rcu_read_lock
1666  */
1667 static netdev_tx_t team_xmit(struct sk_buff *skb, struct net_device *dev)
1668 {
1669         struct team *team = netdev_priv(dev);
1670         bool tx_success;
1671         unsigned int len = skb->len;
1672 
1673         tx_success = team_queue_override_transmit(team, skb);
1674         if (!tx_success)
1675                 tx_success = team->ops.transmit(team, skb);
1676         if (tx_success) {
1677                 struct team_pcpu_stats *pcpu_stats;
1678 
1679                 pcpu_stats = this_cpu_ptr(team->pcpu_stats);
1680                 u64_stats_update_begin(&pcpu_stats->syncp);
1681                 pcpu_stats->tx_packets++;
1682                 pcpu_stats->tx_bytes += len;
1683                 u64_stats_update_end(&pcpu_stats->syncp);
1684         } else {
1685                 this_cpu_inc(team->pcpu_stats->tx_dropped);
1686         }
1687 
1688         return NETDEV_TX_OK;
1689 }
1690 
1691 static u16 team_select_queue(struct net_device *dev, struct sk_buff *skb,
1692                              void *accel_priv, select_queue_fallback_t fallback)
1693 {
1694         /*
1695          * This helper function exists to help dev_pick_tx get the correct
1696          * destination queue.  Using a helper function skips a call to
1697          * skb_tx_hash and will put the skbs in the queue we expect on their
1698          * way down to the team driver.
1699          */
1700         u16 txq = skb_rx_queue_recorded(skb) ? skb_get_rx_queue(skb) : 0;
1701 
1702         /*
1703          * Save the original txq to restore before passing to the driver
1704          */
1705         qdisc_skb_cb(skb)->slave_dev_queue_mapping = skb->queue_mapping;
1706 
1707         if (unlikely(txq >= dev->real_num_tx_queues)) {
1708                 do {
1709                         txq -= dev->real_num_tx_queues;
1710                 } while (txq >= dev->real_num_tx_queues);
1711         }
1712         return txq;
1713 }
1714 
1715 static void team_change_rx_flags(struct net_device *dev, int change)
1716 {
1717         struct team *team = netdev_priv(dev);
1718         struct team_port *port;
1719         int inc;
1720 
1721         rcu_read_lock();
1722         list_for_each_entry_rcu(port, &team->port_list, list) {
1723                 if (change & IFF_PROMISC) {
1724                         inc = dev->flags & IFF_PROMISC ? 1 : -1;
1725                         dev_set_promiscuity(port->dev, inc);
1726                 }
1727                 if (change & IFF_ALLMULTI) {
1728                         inc = dev->flags & IFF_ALLMULTI ? 1 : -1;
1729                         dev_set_allmulti(port->dev, inc);
1730                 }
1731         }
1732         rcu_read_unlock();
1733 }
1734 
1735 static void team_set_rx_mode(struct net_device *dev)
1736 {
1737         struct team *team = netdev_priv(dev);
1738         struct team_port *port;
1739 
1740         rcu_read_lock();
1741         list_for_each_entry_rcu(port, &team->port_list, list) {
1742                 dev_uc_sync_multiple(port->dev, dev);
1743                 dev_mc_sync_multiple(port->dev, dev);
1744         }
1745         rcu_read_unlock();
1746 }
1747 
1748 static int team_set_mac_address(struct net_device *dev, void *p)
1749 {
1750         struct sockaddr *addr = p;
1751         struct team *team = netdev_priv(dev);
1752         struct team_port *port;
1753 
1754         if (dev->type == ARPHRD_ETHER && !is_valid_ether_addr(addr->sa_data))
1755                 return -EADDRNOTAVAIL;
1756         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1757         mutex_lock(&team->lock);
1758         list_for_each_entry(port, &team->port_list, list)
1759                 if (team->ops.port_change_dev_addr)
1760                         team->ops.port_change_dev_addr(team, port);
1761         mutex_unlock(&team->lock);
1762         return 0;
1763 }
1764 
1765 static int team_change_mtu(struct net_device *dev, int new_mtu)
1766 {
1767         struct team *team = netdev_priv(dev);
1768         struct team_port *port;
1769         int err;
1770 
1771         /*
1772          * Alhough this is reader, it's guarded by team lock. It's not possible
1773          * to traverse list in reverse under rcu_read_lock
1774          */
1775         mutex_lock(&team->lock);
1776         team->port_mtu_change_allowed = true;
1777         list_for_each_entry(port, &team->port_list, list) {
1778                 err = dev_set_mtu(port->dev, new_mtu);
1779                 if (err) {
1780                         netdev_err(dev, "Device %s failed to change mtu",
1781                                    port->dev->name);
1782                         goto unwind;
1783                 }
1784         }
1785         team->port_mtu_change_allowed = false;
1786         mutex_unlock(&team->lock);
1787 
1788         dev->mtu = new_mtu;
1789 
1790         return 0;
1791 
1792 unwind:
1793         list_for_each_entry_continue_reverse(port, &team->port_list, list)
1794                 dev_set_mtu(port->dev, dev->mtu);
1795         team->port_mtu_change_allowed = false;
1796         mutex_unlock(&team->lock);
1797 
1798         return err;
1799 }
1800 
1801 static struct rtnl_link_stats64 *
1802 team_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
1803 {
1804         struct team *team = netdev_priv(dev);
1805         struct team_pcpu_stats *p;
1806         u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes;
1807         u32 rx_dropped = 0, tx_dropped = 0, rx_nohandler = 0;
1808         unsigned int start;
1809         int i;
1810 
1811         for_each_possible_cpu(i) {
1812                 p = per_cpu_ptr(team->pcpu_stats, i);
1813                 do {
1814                         start = u64_stats_fetch_begin_irq(&p->syncp);
1815                         rx_packets      = p->rx_packets;
1816                         rx_bytes        = p->rx_bytes;
1817                         rx_multicast    = p->rx_multicast;
1818                         tx_packets      = p->tx_packets;
1819                         tx_bytes        = p->tx_bytes;
1820                 } while (u64_stats_fetch_retry_irq(&p->syncp, start));
1821 
1822                 stats->rx_packets       += rx_packets;
1823                 stats->rx_bytes         += rx_bytes;
1824                 stats->multicast        += rx_multicast;
1825                 stats->tx_packets       += tx_packets;
1826                 stats->tx_bytes         += tx_bytes;
1827                 /*
1828                  * rx_dropped, tx_dropped & rx_nohandler are u32,
1829                  * updated without syncp protection.
1830                  */
1831                 rx_dropped      += p->rx_dropped;
1832                 tx_dropped      += p->tx_dropped;
1833                 rx_nohandler    += p->rx_nohandler;
1834         }
1835         stats->rx_dropped       = rx_dropped;
1836         stats->tx_dropped       = tx_dropped;
1837         stats->rx_nohandler     = rx_nohandler;
1838         return stats;
1839 }
1840 
1841 static int team_vlan_rx_add_vid(struct net_device *dev, __be16 proto, u16 vid)
1842 {
1843         struct team *team = netdev_priv(dev);
1844         struct team_port *port;
1845         int err;
1846 
1847         /*
1848          * Alhough this is reader, it's guarded by team lock. It's not possible
1849          * to traverse list in reverse under rcu_read_lock
1850          */
1851         mutex_lock(&team->lock);
1852         list_for_each_entry(port, &team->port_list, list) {
1853                 err = vlan_vid_add(port->dev, proto, vid);
1854                 if (err)
1855                         goto unwind;
1856         }
1857         mutex_unlock(&team->lock);
1858 
1859         return 0;
1860 
1861 unwind:
1862         list_for_each_entry_continue_reverse(port, &team->port_list, list)
1863                 vlan_vid_del(port->dev, proto, vid);
1864         mutex_unlock(&team->lock);
1865 
1866         return err;
1867 }
1868 
1869 static int team_vlan_rx_kill_vid(struct net_device *dev, __be16 proto, u16 vid)
1870 {
1871         struct team *team = netdev_priv(dev);
1872         struct team_port *port;
1873 
1874         mutex_lock(&team->lock);
1875         list_for_each_entry(port, &team->port_list, list)
1876                 vlan_vid_del(port->dev, proto, vid);
1877         mutex_unlock(&team->lock);
1878 
1879         return 0;
1880 }
1881 
1882 #ifdef CONFIG_NET_POLL_CONTROLLER
1883 static void team_poll_controller(struct net_device *dev)
1884 {
1885 }
1886 
1887 static void __team_netpoll_cleanup(struct team *team)
1888 {
1889         struct team_port *port;
1890 
1891         list_for_each_entry(port, &team->port_list, list)
1892                 team_port_disable_netpoll(port);
1893 }
1894 
1895 static void team_netpoll_cleanup(struct net_device *dev)
1896 {
1897         struct team *team = netdev_priv(dev);
1898 
1899         mutex_lock(&team->lock);
1900         __team_netpoll_cleanup(team);
1901         mutex_unlock(&team->lock);
1902 }
1903 
1904 static int team_netpoll_setup(struct net_device *dev,
1905                               struct netpoll_info *npifo)
1906 {
1907         struct team *team = netdev_priv(dev);
1908         struct team_port *port;
1909         int err = 0;
1910 
1911         mutex_lock(&team->lock);
1912         list_for_each_entry(port, &team->port_list, list) {
1913                 err = team_port_enable_netpoll(team, port);
1914                 if (err) {
1915                         __team_netpoll_cleanup(team);
1916                         break;
1917                 }
1918         }
1919         mutex_unlock(&team->lock);
1920         return err;
1921 }
1922 #endif
1923 
1924 static int team_add_slave(struct net_device *dev, struct net_device *port_dev)
1925 {
1926         struct team *team = netdev_priv(dev);
1927         int err;
1928 
1929         mutex_lock(&team->lock);
1930         err = team_port_add(team, port_dev);
1931         mutex_unlock(&team->lock);
1932         return err;
1933 }
1934 
1935 static int team_del_slave(struct net_device *dev, struct net_device *port_dev)
1936 {
1937         struct team *team = netdev_priv(dev);
1938         int err;
1939 
1940         mutex_lock(&team->lock);
1941         err = team_port_del(team, port_dev);
1942         mutex_unlock(&team->lock);
1943         return err;
1944 }
1945 
1946 static netdev_features_t team_fix_features(struct net_device *dev,
1947                                            netdev_features_t features)
1948 {
1949         struct team_port *port;
1950         struct team *team = netdev_priv(dev);
1951         netdev_features_t mask;
1952 
1953         mask = features;
1954         features &= ~NETIF_F_ONE_FOR_ALL;
1955         features |= NETIF_F_ALL_FOR_ALL;
1956 
1957         rcu_read_lock();
1958         list_for_each_entry_rcu(port, &team->port_list, list) {
1959                 features = netdev_increment_features(features,
1960                                                      port->dev->features,
1961                                                      mask);
1962         }
1963         rcu_read_unlock();
1964 
1965         features = netdev_add_tso_features(features, mask);
1966 
1967         return features;
1968 }
1969 
1970 static int team_change_carrier(struct net_device *dev, bool new_carrier)
1971 {
1972         struct team *team = netdev_priv(dev);
1973 
1974         team->user_carrier_enabled = true;
1975 
1976         if (new_carrier)
1977                 netif_carrier_on(dev);
1978         else
1979                 netif_carrier_off(dev);
1980         return 0;
1981 }
1982 
1983 static const struct net_device_ops team_netdev_ops = {
1984         .ndo_init               = team_init,
1985         .ndo_uninit             = team_uninit,
1986         .ndo_open               = team_open,
1987         .ndo_stop               = team_close,
1988         .ndo_start_xmit         = team_xmit,
1989         .ndo_select_queue       = team_select_queue,
1990         .ndo_change_rx_flags    = team_change_rx_flags,
1991         .ndo_set_rx_mode        = team_set_rx_mode,
1992         .ndo_set_mac_address    = team_set_mac_address,
1993         .ndo_change_mtu         = team_change_mtu,
1994         .ndo_get_stats64        = team_get_stats64,
1995         .ndo_vlan_rx_add_vid    = team_vlan_rx_add_vid,
1996         .ndo_vlan_rx_kill_vid   = team_vlan_rx_kill_vid,
1997 #ifdef CONFIG_NET_POLL_CONTROLLER
1998         .ndo_poll_controller    = team_poll_controller,
1999         .ndo_netpoll_setup      = team_netpoll_setup,
2000         .ndo_netpoll_cleanup    = team_netpoll_cleanup,
2001 #endif
2002         .ndo_add_slave          = team_add_slave,
2003         .ndo_del_slave          = team_del_slave,
2004         .ndo_fix_features       = team_fix_features,
2005         .ndo_neigh_construct    = netdev_default_l2upper_neigh_construct,
2006         .ndo_neigh_destroy      = netdev_default_l2upper_neigh_destroy,
2007         .ndo_change_carrier     = team_change_carrier,
2008         .ndo_bridge_setlink     = switchdev_port_bridge_setlink,
2009         .ndo_bridge_getlink     = switchdev_port_bridge_getlink,
2010         .ndo_bridge_dellink     = switchdev_port_bridge_dellink,
2011         .ndo_fdb_add            = switchdev_port_fdb_add,
2012         .ndo_fdb_del            = switchdev_port_fdb_del,
2013         .ndo_fdb_dump           = switchdev_port_fdb_dump,
2014         .ndo_features_check     = passthru_features_check,
2015 };
2016 
2017 /***********************
2018  * ethtool interface
2019  ***********************/
2020 
2021 static void team_ethtool_get_drvinfo(struct net_device *dev,
2022                                      struct ethtool_drvinfo *drvinfo)
2023 {
2024         strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
2025         strlcpy(drvinfo->version, UTS_RELEASE, sizeof(drvinfo->version));
2026 }
2027 
2028 static const struct ethtool_ops team_ethtool_ops = {
2029         .get_drvinfo            = team_ethtool_get_drvinfo,
2030         .get_link               = ethtool_op_get_link,
2031 };
2032 
2033 /***********************
2034  * rt netlink interface
2035  ***********************/
2036 
2037 static void team_setup_by_port(struct net_device *dev,
2038                                struct net_device *port_dev)
2039 {
2040         dev->header_ops = port_dev->header_ops;
2041         dev->type = port_dev->type;
2042         dev->hard_header_len = port_dev->hard_header_len;
2043         dev->addr_len = port_dev->addr_len;
2044         dev->mtu = port_dev->mtu;
2045         memcpy(dev->broadcast, port_dev->broadcast, port_dev->addr_len);
2046         eth_hw_addr_inherit(dev, port_dev);
2047 }
2048 
2049 static int team_dev_type_check_change(struct net_device *dev,
2050                                       struct net_device *port_dev)
2051 {
2052         struct team *team = netdev_priv(dev);
2053         char *portname = port_dev->name;
2054         int err;
2055 
2056         if (dev->type == port_dev->type)
2057                 return 0;
2058         if (!list_empty(&team->port_list)) {
2059                 netdev_err(dev, "Device %s is of different type\n", portname);
2060                 return -EBUSY;
2061         }
2062         err = call_netdevice_notifiers(NETDEV_PRE_TYPE_CHANGE, dev);
2063         err = notifier_to_errno(err);
2064         if (err) {
2065                 netdev_err(dev, "Refused to change device type\n");
2066                 return err;
2067         }
2068         dev_uc_flush(dev);
2069         dev_mc_flush(dev);
2070         team_setup_by_port(dev, port_dev);
2071         call_netdevice_notifiers(NETDEV_POST_TYPE_CHANGE, dev);
2072         return 0;
2073 }
2074 
2075 static void team_setup(struct net_device *dev)
2076 {
2077         ether_setup(dev);
2078 
2079         dev->netdev_ops = &team_netdev_ops;
2080         dev->ethtool_ops = &team_ethtool_ops;
2081         dev->destructor = team_destructor;
2082         dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING);
2083         dev->priv_flags |= IFF_NO_QUEUE;
2084         dev->priv_flags |= IFF_TEAM;
2085 
2086         /*
2087          * Indicate we support unicast address filtering. That way core won't
2088          * bring us to promisc mode in case a unicast addr is added.
2089          * Let this up to underlay drivers.
2090          */
2091         dev->priv_flags |= IFF_UNICAST_FLT | IFF_LIVE_ADDR_CHANGE;
2092 
2093         dev->features |= NETIF_F_LLTX;
2094         dev->features |= NETIF_F_GRO;
2095 
2096         /* Don't allow team devices to change network namespaces. */
2097         dev->features |= NETIF_F_NETNS_LOCAL;
2098 
2099         dev->hw_features = TEAM_VLAN_FEATURES |
2100                            NETIF_F_HW_VLAN_CTAG_TX |
2101                            NETIF_F_HW_VLAN_CTAG_RX |
2102                            NETIF_F_HW_VLAN_CTAG_FILTER;
2103 
2104         dev->hw_features |= NETIF_F_GSO_ENCAP_ALL;
2105         dev->features |= dev->hw_features;
2106 }
2107 
2108 static int team_newlink(struct net *src_net, struct net_device *dev,
2109                         struct nlattr *tb[], struct nlattr *data[])
2110 {
2111         if (tb[IFLA_ADDRESS] == NULL)
2112                 eth_hw_addr_random(dev);
2113 
2114         return register_netdevice(dev);
2115 }
2116 
2117 static int team_validate(struct nlattr *tb[], struct nlattr *data[])
2118 {
2119         if (tb[IFLA_ADDRESS]) {
2120                 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
2121                         return -EINVAL;
2122                 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
2123                         return -EADDRNOTAVAIL;
2124         }
2125         return 0;
2126 }
2127 
2128 static unsigned int team_get_num_tx_queues(void)
2129 {
2130         return TEAM_DEFAULT_NUM_TX_QUEUES;
2131 }
2132 
2133 static unsigned int team_get_num_rx_queues(void)
2134 {
2135         return TEAM_DEFAULT_NUM_RX_QUEUES;
2136 }
2137 
2138 static struct rtnl_link_ops team_link_ops __read_mostly = {
2139         .kind                   = DRV_NAME,
2140         .priv_size              = sizeof(struct team),
2141         .setup                  = team_setup,
2142         .newlink                = team_newlink,
2143         .validate               = team_validate,
2144         .get_num_tx_queues      = team_get_num_tx_queues,
2145         .get_num_rx_queues      = team_get_num_rx_queues,
2146 };
2147 
2148 
2149 /***********************************
2150  * Generic netlink custom interface
2151  ***********************************/
2152 
2153 static struct genl_family team_nl_family = {
2154         .id             = GENL_ID_GENERATE,
2155         .name           = TEAM_GENL_NAME,
2156         .version        = TEAM_GENL_VERSION,
2157         .maxattr        = TEAM_ATTR_MAX,
2158         .netnsok        = true,
2159 };
2160 
2161 static const struct nla_policy team_nl_policy[TEAM_ATTR_MAX + 1] = {
2162         [TEAM_ATTR_UNSPEC]                      = { .type = NLA_UNSPEC, },
2163         [TEAM_ATTR_TEAM_IFINDEX]                = { .type = NLA_U32 },
2164         [TEAM_ATTR_LIST_OPTION]                 = { .type = NLA_NESTED },
2165         [TEAM_ATTR_LIST_PORT]                   = { .type = NLA_NESTED },
2166 };
2167 
2168 static const struct nla_policy
2169 team_nl_option_policy[TEAM_ATTR_OPTION_MAX + 1] = {
2170         [TEAM_ATTR_OPTION_UNSPEC]               = { .type = NLA_UNSPEC, },
2171         [TEAM_ATTR_OPTION_NAME] = {
2172                 .type = NLA_STRING,
2173                 .len = TEAM_STRING_MAX_LEN,
2174         },
2175         [TEAM_ATTR_OPTION_CHANGED]              = { .type = NLA_FLAG },
2176         [TEAM_ATTR_OPTION_TYPE]                 = { .type = NLA_U8 },
2177         [TEAM_ATTR_OPTION_DATA]                 = { .type = NLA_BINARY },
2178 };
2179 
2180 static int team_nl_cmd_noop(struct sk_buff *skb, struct genl_info *info)
2181 {
2182         struct sk_buff *msg;
2183         void *hdr;
2184         int err;
2185 
2186         msg = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
2187         if (!msg)
2188                 return -ENOMEM;
2189 
2190         hdr = genlmsg_put(msg, info->snd_portid, info->snd_seq,
2191                           &team_nl_family, 0, TEAM_CMD_NOOP);
2192         if (!hdr) {
2193                 err = -EMSGSIZE;
2194                 goto err_msg_put;
2195         }
2196 
2197         genlmsg_end(msg, hdr);
2198 
2199         return genlmsg_unicast(genl_info_net(info), msg, info->snd_portid);
2200 
2201 err_msg_put:
2202         nlmsg_free(msg);
2203 
2204         return err;
2205 }
2206 
2207 /*
2208  * Netlink cmd functions should be locked by following two functions.
2209  * Since dev gets held here, that ensures dev won't disappear in between.
2210  */
2211 static struct team *team_nl_team_get(struct genl_info *info)
2212 {
2213         struct net *net = genl_info_net(info);
2214         int ifindex;
2215         struct net_device *dev;
2216         struct team *team;
2217 
2218         if (!info->attrs[TEAM_ATTR_TEAM_IFINDEX])
2219                 return NULL;
2220 
2221         ifindex = nla_get_u32(info->attrs[TEAM_ATTR_TEAM_IFINDEX]);
2222         dev = dev_get_by_index(net, ifindex);
2223         if (!dev || dev->netdev_ops != &team_netdev_ops) {
2224                 if (dev)
2225                         dev_put(dev);
2226                 return NULL;
2227         }
2228 
2229         team = netdev_priv(dev);
2230         mutex_lock(&team->lock);
2231         return team;
2232 }
2233 
2234 static void team_nl_team_put(struct team *team)
2235 {
2236         mutex_unlock(&team->lock);
2237         dev_put(team->dev);
2238 }
2239 
2240 typedef int team_nl_send_func_t(struct sk_buff *skb,
2241                                 struct team *team, u32 portid);
2242 
2243 static int team_nl_send_unicast(struct sk_buff *skb, struct team *team, u32 portid)
2244 {
2245         return genlmsg_unicast(dev_net(team->dev), skb, portid);
2246 }
2247 
2248 static int team_nl_fill_one_option_get(struct sk_buff *skb, struct team *team,
2249                                        struct team_option_inst *opt_inst)
2250 {
2251         struct nlattr *option_item;
2252         struct team_option *option = opt_inst->option;
2253         struct team_option_inst_info *opt_inst_info = &opt_inst->info;
2254         struct team_gsetter_ctx ctx;
2255         int err;
2256 
2257         ctx.info = opt_inst_info;
2258         err = team_option_get(team, opt_inst, &ctx);
2259         if (err)
2260                 return err;
2261 
2262         option_item = nla_nest_start(skb, TEAM_ATTR_ITEM_OPTION);
2263         if (!option_item)
2264                 return -EMSGSIZE;
2265 
2266         if (nla_put_string(skb, TEAM_ATTR_OPTION_NAME, option->name))
2267                 goto nest_cancel;
2268         if (opt_inst_info->port &&
2269             nla_put_u32(skb, TEAM_ATTR_OPTION_PORT_IFINDEX,
2270                         opt_inst_info->port->dev->ifindex))
2271                 goto nest_cancel;
2272         if (opt_inst->option->array_size &&
2273             nla_put_u32(skb, TEAM_ATTR_OPTION_ARRAY_INDEX,
2274                         opt_inst_info->array_index))
2275                 goto nest_cancel;
2276 
2277         switch (option->type) {
2278         case TEAM_OPTION_TYPE_U32:
2279                 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_U32))
2280                         goto nest_cancel;
2281                 if (nla_put_u32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.u32_val))
2282                         goto nest_cancel;
2283                 break;
2284         case TEAM_OPTION_TYPE_STRING:
2285                 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_STRING))
2286                         goto nest_cancel;
2287                 if (nla_put_string(skb, TEAM_ATTR_OPTION_DATA,
2288                                    ctx.data.str_val))
2289                         goto nest_cancel;
2290                 break;
2291         case TEAM_OPTION_TYPE_BINARY:
2292                 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_BINARY))
2293                         goto nest_cancel;
2294                 if (nla_put(skb, TEAM_ATTR_OPTION_DATA, ctx.data.bin_val.len,
2295                             ctx.data.bin_val.ptr))
2296                         goto nest_cancel;
2297                 break;
2298         case TEAM_OPTION_TYPE_BOOL:
2299                 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_FLAG))
2300                         goto nest_cancel;
2301                 if (ctx.data.bool_val &&
2302                     nla_put_flag(skb, TEAM_ATTR_OPTION_DATA))
2303                         goto nest_cancel;
2304                 break;
2305         case TEAM_OPTION_TYPE_S32:
2306                 if (nla_put_u8(skb, TEAM_ATTR_OPTION_TYPE, NLA_S32))
2307                         goto nest_cancel;
2308                 if (nla_put_s32(skb, TEAM_ATTR_OPTION_DATA, ctx.data.s32_val))
2309                         goto nest_cancel;
2310                 break;
2311         default:
2312                 BUG();
2313         }
2314         if (opt_inst->removed && nla_put_flag(skb, TEAM_ATTR_OPTION_REMOVED))
2315                 goto nest_cancel;
2316         if (opt_inst->changed) {
2317                 if (nla_put_flag(skb, TEAM_ATTR_OPTION_CHANGED))
2318                         goto nest_cancel;
2319                 opt_inst->changed = false;
2320         }
2321         nla_nest_end(skb, option_item);
2322         return 0;
2323 
2324 nest_cancel:
2325         nla_nest_cancel(skb, option_item);
2326         return -EMSGSIZE;
2327 }
2328 
2329 static int __send_and_alloc_skb(struct sk_buff **pskb,
2330                                 struct team *team, u32 portid,
2331                                 team_nl_send_func_t *send_func)
2332 {
2333         int err;
2334 
2335         if (*pskb) {
2336                 err = send_func(*pskb, team, portid);
2337                 if (err)
2338                         return err;
2339         }
2340         *pskb = genlmsg_new(GENLMSG_DEFAULT_SIZE, GFP_KERNEL);
2341         if (!*pskb)
2342                 return -ENOMEM;
2343         return 0;
2344 }
2345 
2346 static int team_nl_send_options_get(struct team *team, u32 portid, u32 seq,
2347                                     int flags, team_nl_send_func_t *send_func,
2348                                     struct list_head *sel_opt_inst_list)
2349 {
2350         struct nlattr *option_list;
2351         struct nlmsghdr *nlh;
2352         void *hdr;
2353         struct team_option_inst *opt_inst;
2354         int err;
2355         struct sk_buff *skb = NULL;
2356         bool incomplete;
2357         int i;
2358 
2359         opt_inst = list_first_entry(sel_opt_inst_list,
2360                                     struct team_option_inst, tmp_list);
2361 
2362 start_again:
2363         err = __send_and_alloc_skb(&skb, team, portid, send_func);
2364         if (err)
2365                 return err;
2366 
2367         hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
2368                           TEAM_CMD_OPTIONS_GET);
2369         if (!hdr)
2370                 return -EMSGSIZE;
2371 
2372         if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
2373                 goto nla_put_failure;
2374         option_list = nla_nest_start(skb, TEAM_ATTR_LIST_OPTION);
2375         if (!option_list)
2376                 goto nla_put_failure;
2377 
2378         i = 0;
2379         incomplete = false;
2380         list_for_each_entry_from(opt_inst, sel_opt_inst_list, tmp_list) {
2381                 err = team_nl_fill_one_option_get(skb, team, opt_inst);
2382                 if (err) {
2383                         if (err == -EMSGSIZE) {
2384                                 if (!i)
2385                                         goto errout;
2386                                 incomplete = true;
2387                                 break;
2388                         }
2389                         goto errout;
2390                 }
2391                 i++;
2392         }
2393 
2394         nla_nest_end(skb, option_list);
2395         genlmsg_end(skb, hdr);
2396         if (incomplete)
2397                 goto start_again;
2398 
2399 send_done:
2400         nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
2401         if (!nlh) {
2402                 err = __send_and_alloc_skb(&skb, team, portid, send_func);
2403                 if (err)
2404                         goto errout;
2405                 goto send_done;
2406         }
2407 
2408         return send_func(skb, team, portid);
2409 
2410 nla_put_failure:
2411         err = -EMSGSIZE;
2412 errout:
2413         genlmsg_cancel(skb, hdr);
2414         nlmsg_free(skb);
2415         return err;
2416 }
2417 
2418 static int team_nl_cmd_options_get(struct sk_buff *skb, struct genl_info *info)
2419 {
2420         struct team *team;
2421         struct team_option_inst *opt_inst;
2422         int err;
2423         LIST_HEAD(sel_opt_inst_list);
2424 
2425         team = team_nl_team_get(info);
2426         if (!team)
2427                 return -EINVAL;
2428 
2429         list_for_each_entry(opt_inst, &team->option_inst_list, list)
2430                 list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
2431         err = team_nl_send_options_get(team, info->snd_portid, info->snd_seq,
2432                                        NLM_F_ACK, team_nl_send_unicast,
2433                                        &sel_opt_inst_list);
2434 
2435         team_nl_team_put(team);
2436 
2437         return err;
2438 }
2439 
2440 static int team_nl_send_event_options_get(struct team *team,
2441                                           struct list_head *sel_opt_inst_list);
2442 
2443 static int team_nl_cmd_options_set(struct sk_buff *skb, struct genl_info *info)
2444 {
2445         struct team *team;
2446         int err = 0;
2447         int i;
2448         struct nlattr *nl_option;
2449         LIST_HEAD(opt_inst_list);
2450 
2451         rtnl_lock();
2452 
2453         team = team_nl_team_get(info);
2454         if (!team) {
2455                 err = -EINVAL;
2456                 goto rtnl_unlock;
2457         }
2458 
2459         err = -EINVAL;
2460         if (!info->attrs[TEAM_ATTR_LIST_OPTION]) {
2461                 err = -EINVAL;
2462                 goto team_put;
2463         }
2464 
2465         nla_for_each_nested(nl_option, info->attrs[TEAM_ATTR_LIST_OPTION], i) {
2466                 struct nlattr *opt_attrs[TEAM_ATTR_OPTION_MAX + 1];
2467                 struct nlattr *attr;
2468                 struct nlattr *attr_data;
2469                 enum team_option_type opt_type;
2470                 int opt_port_ifindex = 0; /* != 0 for per-port options */
2471                 u32 opt_array_index = 0;
2472                 bool opt_is_array = false;
2473                 struct team_option_inst *opt_inst;
2474                 char *opt_name;
2475                 bool opt_found = false;
2476 
2477                 if (nla_type(nl_option) != TEAM_ATTR_ITEM_OPTION) {
2478                         err = -EINVAL;
2479                         goto team_put;
2480                 }
2481                 err = nla_parse_nested(opt_attrs, TEAM_ATTR_OPTION_MAX,
2482                                        nl_option, team_nl_option_policy);
2483                 if (err)
2484                         goto team_put;
2485                 if (!opt_attrs[TEAM_ATTR_OPTION_NAME] ||
2486                     !opt_attrs[TEAM_ATTR_OPTION_TYPE]) {
2487                         err = -EINVAL;
2488                         goto team_put;
2489                 }
2490                 switch (nla_get_u8(opt_attrs[TEAM_ATTR_OPTION_TYPE])) {
2491                 case NLA_U32:
2492                         opt_type = TEAM_OPTION_TYPE_U32;
2493                         break;
2494                 case NLA_STRING:
2495                         opt_type = TEAM_OPTION_TYPE_STRING;
2496                         break;
2497                 case NLA_BINARY:
2498                         opt_type = TEAM_OPTION_TYPE_BINARY;
2499                         break;
2500                 case NLA_FLAG:
2501                         opt_type = TEAM_OPTION_TYPE_BOOL;
2502                         break;
2503                 case NLA_S32:
2504                         opt_type = TEAM_OPTION_TYPE_S32;
2505                         break;
2506                 default:
2507                         goto team_put;
2508                 }
2509 
2510                 attr_data = opt_attrs[TEAM_ATTR_OPTION_DATA];
2511                 if (opt_type != TEAM_OPTION_TYPE_BOOL && !attr_data) {
2512                         err = -EINVAL;
2513                         goto team_put;
2514                 }
2515 
2516                 opt_name = nla_data(opt_attrs[TEAM_ATTR_OPTION_NAME]);
2517                 attr = opt_attrs[TEAM_ATTR_OPTION_PORT_IFINDEX];
2518                 if (attr)
2519                         opt_port_ifindex = nla_get_u32(attr);
2520 
2521                 attr = opt_attrs[TEAM_ATTR_OPTION_ARRAY_INDEX];
2522                 if (attr) {
2523                         opt_is_array = true;
2524                         opt_array_index = nla_get_u32(attr);
2525                 }
2526 
2527                 list_for_each_entry(opt_inst, &team->option_inst_list, list) {
2528                         struct team_option *option = opt_inst->option;
2529                         struct team_gsetter_ctx ctx;
2530                         struct team_option_inst_info *opt_inst_info;
2531                         int tmp_ifindex;
2532 
2533                         opt_inst_info = &opt_inst->info;
2534                         tmp_ifindex = opt_inst_info->port ?
2535                                       opt_inst_info->port->dev->ifindex : 0;
2536                         if (option->type != opt_type ||
2537                             strcmp(option->name, opt_name) ||
2538                             tmp_ifindex != opt_port_ifindex ||
2539                             (option->array_size && !opt_is_array) ||
2540                             opt_inst_info->array_index != opt_array_index)
2541                                 continue;
2542                         opt_found = true;
2543                         ctx.info = opt_inst_info;
2544                         switch (opt_type) {
2545                         case TEAM_OPTION_TYPE_U32:
2546                                 ctx.data.u32_val = nla_get_u32(attr_data);
2547                                 break;
2548                         case TEAM_OPTION_TYPE_STRING:
2549                                 if (nla_len(attr_data) > TEAM_STRING_MAX_LEN) {
2550                                         err = -EINVAL;
2551                                         goto team_put;
2552                                 }
2553                                 ctx.data.str_val = nla_data(attr_data);
2554                                 break;
2555                         case TEAM_OPTION_TYPE_BINARY:
2556                                 ctx.data.bin_val.len = nla_len(attr_data);
2557                                 ctx.data.bin_val.ptr = nla_data(attr_data);
2558                                 break;
2559                         case TEAM_OPTION_TYPE_BOOL:
2560                                 ctx.data.bool_val = attr_data ? true : false;
2561                                 break;
2562                         case TEAM_OPTION_TYPE_S32:
2563                                 ctx.data.s32_val = nla_get_s32(attr_data);
2564                                 break;
2565                         default:
2566                                 BUG();
2567                         }
2568                         err = team_option_set(team, opt_inst, &ctx);
2569                         if (err)
2570                                 goto team_put;
2571                         opt_inst->changed = true;
2572                         list_add(&opt_inst->tmp_list, &opt_inst_list);
2573                 }
2574                 if (!opt_found) {
2575                         err = -ENOENT;
2576                         goto team_put;
2577                 }
2578         }
2579 
2580         err = team_nl_send_event_options_get(team, &opt_inst_list);
2581 
2582 team_put:
2583         team_nl_team_put(team);
2584 rtnl_unlock:
2585         rtnl_unlock();
2586         return err;
2587 }
2588 
2589 static int team_nl_fill_one_port_get(struct sk_buff *skb,
2590                                      struct team_port *port)
2591 {
2592         struct nlattr *port_item;
2593 
2594         port_item = nla_nest_start(skb, TEAM_ATTR_ITEM_PORT);
2595         if (!port_item)
2596                 goto nest_cancel;
2597         if (nla_put_u32(skb, TEAM_ATTR_PORT_IFINDEX, port->dev->ifindex))
2598                 goto nest_cancel;
2599         if (port->changed) {
2600                 if (nla_put_flag(skb, TEAM_ATTR_PORT_CHANGED))
2601                         goto nest_cancel;
2602                 port->changed = false;
2603         }
2604         if ((port->removed &&
2605              nla_put_flag(skb, TEAM_ATTR_PORT_REMOVED)) ||
2606             (port->state.linkup &&
2607              nla_put_flag(skb, TEAM_ATTR_PORT_LINKUP)) ||
2608             nla_put_u32(skb, TEAM_ATTR_PORT_SPEED, port->state.speed) ||
2609             nla_put_u8(skb, TEAM_ATTR_PORT_DUPLEX, port->state.duplex))
2610                 goto nest_cancel;
2611         nla_nest_end(skb, port_item);
2612         return 0;
2613 
2614 nest_cancel:
2615         nla_nest_cancel(skb, port_item);
2616         return -EMSGSIZE;
2617 }
2618 
2619 static int team_nl_send_port_list_get(struct team *team, u32 portid, u32 seq,
2620                                       int flags, team_nl_send_func_t *send_func,
2621                                       struct team_port *one_port)
2622 {
2623         struct nlattr *port_list;
2624         struct nlmsghdr *nlh;
2625         void *hdr;
2626         struct team_port *port;
2627         int err;
2628         struct sk_buff *skb = NULL;
2629         bool incomplete;
2630         int i;
2631 
2632         port = list_first_entry_or_null(&team->port_list,
2633                                         struct team_port, list);
2634 
2635 start_again:
2636         err = __send_and_alloc_skb(&skb, team, portid, send_func);
2637         if (err)
2638                 return err;
2639 
2640         hdr = genlmsg_put(skb, portid, seq, &team_nl_family, flags | NLM_F_MULTI,
2641                           TEAM_CMD_PORT_LIST_GET);
2642         if (!hdr)
2643                 return -EMSGSIZE;
2644 
2645         if (nla_put_u32(skb, TEAM_ATTR_TEAM_IFINDEX, team->dev->ifindex))
2646                 goto nla_put_failure;
2647         port_list = nla_nest_start(skb, TEAM_ATTR_LIST_PORT);
2648         if (!port_list)
2649                 goto nla_put_failure;
2650 
2651         i = 0;
2652         incomplete = false;
2653 
2654         /* If one port is selected, called wants to send port list containing
2655          * only this port. Otherwise go through all listed ports and send all
2656          */
2657         if (one_port) {
2658                 err = team_nl_fill_one_port_get(skb, one_port);
2659                 if (err)
2660                         goto errout;
2661         } else if (port) {
2662                 list_for_each_entry_from(port, &team->port_list, list) {
2663                         err = team_nl_fill_one_port_get(skb, port);
2664                         if (err) {
2665                                 if (err == -EMSGSIZE) {
2666                                         if (!i)
2667                                                 goto errout;
2668                                         incomplete = true;
2669                                         break;
2670                                 }
2671                                 goto errout;
2672                         }
2673                         i++;
2674                 }
2675         }
2676 
2677         nla_nest_end(skb, port_list);
2678         genlmsg_end(skb, hdr);
2679         if (incomplete)
2680                 goto start_again;
2681 
2682 send_done:
2683         nlh = nlmsg_put(skb, portid, seq, NLMSG_DONE, 0, flags | NLM_F_MULTI);
2684         if (!nlh) {
2685                 err = __send_and_alloc_skb(&skb, team, portid, send_func);
2686                 if (err)
2687                         goto errout;
2688                 goto send_done;
2689         }
2690 
2691         return send_func(skb, team, portid);
2692 
2693 nla_put_failure:
2694         err = -EMSGSIZE;
2695 errout:
2696         genlmsg_cancel(skb, hdr);
2697         nlmsg_free(skb);
2698         return err;
2699 }
2700 
2701 static int team_nl_cmd_port_list_get(struct sk_buff *skb,
2702                                      struct genl_info *info)
2703 {
2704         struct team *team;
2705         int err;
2706 
2707         team = team_nl_team_get(info);
2708         if (!team)
2709                 return -EINVAL;
2710 
2711         err = team_nl_send_port_list_get(team, info->snd_portid, info->snd_seq,
2712                                          NLM_F_ACK, team_nl_send_unicast, NULL);
2713 
2714         team_nl_team_put(team);
2715 
2716         return err;
2717 }
2718 
2719 static const struct genl_ops team_nl_ops[] = {
2720         {
2721                 .cmd = TEAM_CMD_NOOP,
2722                 .doit = team_nl_cmd_noop,
2723                 .policy = team_nl_policy,
2724         },
2725         {
2726                 .cmd = TEAM_CMD_OPTIONS_SET,
2727                 .doit = team_nl_cmd_options_set,
2728                 .policy = team_nl_policy,
2729                 .flags = GENL_ADMIN_PERM,
2730         },
2731         {
2732                 .cmd = TEAM_CMD_OPTIONS_GET,
2733                 .doit = team_nl_cmd_options_get,
2734                 .policy = team_nl_policy,
2735                 .flags = GENL_ADMIN_PERM,
2736         },
2737         {
2738                 .cmd = TEAM_CMD_PORT_LIST_GET,
2739                 .doit = team_nl_cmd_port_list_get,
2740                 .policy = team_nl_policy,
2741                 .flags = GENL_ADMIN_PERM,
2742         },
2743 };
2744 
2745 static const struct genl_multicast_group team_nl_mcgrps[] = {
2746         { .name = TEAM_GENL_CHANGE_EVENT_MC_GRP_NAME, },
2747 };
2748 
2749 static int team_nl_send_multicast(struct sk_buff *skb,
2750                                   struct team *team, u32 portid)
2751 {
2752         return genlmsg_multicast_netns(&team_nl_family, dev_net(team->dev),
2753                                        skb, 0, 0, GFP_KERNEL);
2754 }
2755 
2756 static int team_nl_send_event_options_get(struct team *team,
2757                                           struct list_head *sel_opt_inst_list)
2758 {
2759         return team_nl_send_options_get(team, 0, 0, 0, team_nl_send_multicast,
2760                                         sel_opt_inst_list);
2761 }
2762 
2763 static int team_nl_send_event_port_get(struct team *team,
2764                                        struct team_port *port)
2765 {
2766         return team_nl_send_port_list_get(team, 0, 0, 0, team_nl_send_multicast,
2767                                           port);
2768 }
2769 
2770 static int team_nl_init(void)
2771 {
2772         return genl_register_family_with_ops_groups(&team_nl_family, team_nl_ops,
2773                                                     team_nl_mcgrps);
2774 }
2775 
2776 static void team_nl_fini(void)
2777 {
2778         genl_unregister_family(&team_nl_family);
2779 }
2780 
2781 
2782 /******************
2783  * Change checkers
2784  ******************/
2785 
2786 static void __team_options_change_check(struct team *team)
2787 {
2788         int err;
2789         struct team_option_inst *opt_inst;
2790         LIST_HEAD(sel_opt_inst_list);
2791 
2792         list_for_each_entry(opt_inst, &team->option_inst_list, list) {
2793                 if (opt_inst->changed)
2794                         list_add_tail(&opt_inst->tmp_list, &sel_opt_inst_list);
2795         }
2796         err = team_nl_send_event_options_get(team, &sel_opt_inst_list);
2797         if (err && err != -ESRCH)
2798                 netdev_warn(team->dev, "Failed to send options change via netlink (err %d)\n",
2799                             err);
2800 }
2801 
2802 /* rtnl lock is held */
2803 
2804 static void __team_port_change_send(struct team_port *port, bool linkup)
2805 {
2806         int err;
2807 
2808         port->changed = true;
2809         port->state.linkup = linkup;
2810         team_refresh_port_linkup(port);
2811         if (linkup) {
2812                 struct ethtool_link_ksettings ecmd;
2813 
2814                 err = __ethtool_get_link_ksettings(port->dev, &ecmd);
2815                 if (!err) {
2816                         port->state.speed = ecmd.base.speed;
2817                         port->state.duplex = ecmd.base.duplex;
2818                         goto send_event;
2819                 }
2820         }
2821         port->state.speed = 0;
2822         port->state.duplex = 0;
2823 
2824 send_event:
2825         err = team_nl_send_event_port_get(port->team, port);
2826         if (err && err != -ESRCH)
2827                 netdev_warn(port->team->dev, "Failed to send port change of device %s via netlink (err %d)\n",
2828                             port->dev->name, err);
2829 
2830 }
2831 
2832 static void __team_carrier_check(struct team *team)
2833 {
2834         struct team_port *port;
2835         bool team_linkup;
2836 
2837         if (team->user_carrier_enabled)
2838                 return;
2839 
2840         team_linkup = false;
2841         list_for_each_entry(port, &team->port_list, list) {
2842                 if (port->linkup) {
2843                         team_linkup = true;
2844                         break;
2845                 }
2846         }
2847 
2848         if (team_linkup)
2849                 netif_carrier_on(team->dev);
2850         else
2851                 netif_carrier_off(team->dev);
2852 }
2853 
2854 static void __team_port_change_check(struct team_port *port, bool linkup)
2855 {
2856         if (port->state.linkup != linkup)
2857                 __team_port_change_send(port, linkup);
2858         __team_carrier_check(port->team);
2859 }
2860 
2861 static void __team_port_change_port_added(struct team_port *port, bool linkup)
2862 {
2863         __team_port_change_send(port, linkup);
2864         __team_carrier_check(port->team);
2865 }
2866 
2867 static void __team_port_change_port_removed(struct team_port *port)
2868 {
2869         port->removed = true;
2870         __team_port_change_send(port, false);
2871         __team_carrier_check(port->team);
2872 }
2873 
2874 static void team_port_change_check(struct team_port *port, bool linkup)
2875 {
2876         struct team *team = port->team;
2877 
2878         mutex_lock(&team->lock);
2879         __team_port_change_check(port, linkup);
2880         mutex_unlock(&team->lock);
2881 }
2882 
2883 
2884 /************************************
2885  * Net device notifier event handler
2886  ************************************/
2887 
2888 static int team_device_event(struct notifier_block *unused,
2889                              unsigned long event, void *ptr)
2890 {
2891         struct net_device *dev = netdev_notifier_info_to_dev(ptr);
2892         struct team_port *port;
2893 
2894         port = team_port_get_rtnl(dev);
2895         if (!port)
2896                 return NOTIFY_DONE;
2897 
2898         switch (event) {
2899         case NETDEV_UP:
2900                 if (netif_carrier_ok(dev))
2901                         team_port_change_check(port, true);
2902                 break;
2903         case NETDEV_DOWN:
2904                 team_port_change_check(port, false);
2905                 break;
2906         case NETDEV_CHANGE:
2907                 if (netif_running(port->dev))
2908                         team_port_change_check(port,
2909                                                !!netif_carrier_ok(port->dev));
2910                 break;
2911         case NETDEV_UNREGISTER:
2912                 team_del_slave(port->team->dev, dev);
2913                 break;
2914         case NETDEV_FEAT_CHANGE:
2915                 team_compute_features(port->team);
2916                 break;
2917         case NETDEV_PRECHANGEMTU:
2918                 /* Forbid to change mtu of underlaying device */
2919                 if (!port->team->port_mtu_change_allowed)
2920                         return NOTIFY_BAD;
2921                 break;
2922         case NETDEV_PRE_TYPE_CHANGE:
2923                 /* Forbid to change type of underlaying device */
2924                 return NOTIFY_BAD;
2925         case NETDEV_RESEND_IGMP:
2926                 /* Propagate to master device */
2927                 call_netdevice_notifiers(event, port->team->dev);
2928                 break;
2929         }
2930         return NOTIFY_DONE;
2931 }
2932 
2933 static struct notifier_block team_notifier_block __read_mostly = {
2934         .notifier_call = team_device_event,
2935 };
2936 
2937 
2938 /***********************
2939  * Module init and exit
2940  ***********************/
2941 
2942 static int __init team_module_init(void)
2943 {
2944         int err;
2945 
2946         register_netdevice_notifier(&team_notifier_block);
2947 
2948         err = rtnl_link_register(&team_link_ops);
2949         if (err)
2950                 goto err_rtnl_reg;
2951 
2952         err = team_nl_init();
2953         if (err)
2954                 goto err_nl_init;
2955 
2956         return 0;
2957 
2958 err_nl_init:
2959         rtnl_link_unregister(&team_link_ops);
2960 
2961 err_rtnl_reg:
2962         unregister_netdevice_notifier(&team_notifier_block);
2963 
2964         return err;
2965 }
2966 
2967 static void __exit team_module_exit(void)
2968 {
2969         team_nl_fini();
2970         rtnl_link_unregister(&team_link_ops);
2971         unregister_netdevice_notifier(&team_notifier_block);
2972 }
2973 
2974 module_init(team_module_init);
2975 module_exit(team_module_exit);
2976 
2977 MODULE_LICENSE("GPL v2");
2978 MODULE_AUTHOR("Jiri Pirko <jpirko@redhat.com>");
2979 MODULE_DESCRIPTION("Ethernet team device driver");
2980 MODULE_ALIAS_RTNL_LINK(DRV_NAME);
2981 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us