Version:  2.0.40 2.2.26 2.4.37 3.6 3.7 3.8 3.9 3.10 3.11 3.12 3.13 3.14 3.15 3.16 3.17 3.18 3.19 4.0 4.1 4.2

Linux/net/bridge/br_forward.c

  1 /*
  2  *      Forwarding decision
  3  *      Linux ethernet bridge
  4  *
  5  *      Authors:
  6  *      Lennert Buytenhek               <buytenh@gnu.org>
  7  *
  8  *      This program is free software; you can redistribute it and/or
  9  *      modify it under the terms of the GNU General Public License
 10  *      as published by the Free Software Foundation; either version
 11  *      2 of the License, or (at your option) any later version.
 12  */
 13 
 14 #include <linux/err.h>
 15 #include <linux/slab.h>
 16 #include <linux/kernel.h>
 17 #include <linux/netdevice.h>
 18 #include <linux/netpoll.h>
 19 #include <linux/skbuff.h>
 20 #include <linux/if_vlan.h>
 21 #include <linux/netfilter_bridge.h>
 22 #include "br_private.h"
 23 
 24 static int deliver_clone(const struct net_bridge_port *prev,
 25                          struct sk_buff *skb,
 26                          void (*__packet_hook)(const struct net_bridge_port *p,
 27                                                struct sk_buff *skb));
 28 
 29 /* Don't forward packets to originating port or forwarding disabled */
 30 static inline int should_deliver(const struct net_bridge_port *p,
 31                                  const struct sk_buff *skb)
 32 {
 33         return ((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
 34                 br_allowed_egress(p->br, nbp_get_vlan_info(p), skb) &&
 35                 p->state == BR_STATE_FORWARDING;
 36 }
 37 
 38 int br_dev_queue_push_xmit(struct sock *sk, struct sk_buff *skb)
 39 {
 40         if (!is_skb_forwardable(skb->dev, skb))
 41                 goto drop;
 42 
 43         skb_push(skb, ETH_HLEN);
 44         br_drop_fake_rtable(skb);
 45         skb_sender_cpu_clear(skb);
 46 
 47         if (skb->ip_summed == CHECKSUM_PARTIAL &&
 48             (skb->protocol == htons(ETH_P_8021Q) ||
 49              skb->protocol == htons(ETH_P_8021AD))) {
 50                 int depth;
 51 
 52                 if (!__vlan_get_protocol(skb, skb->protocol, &depth))
 53                         goto drop;
 54 
 55                 skb_set_network_header(skb, depth);
 56         }
 57 
 58         dev_queue_xmit(skb);
 59 
 60         return 0;
 61 
 62 drop:
 63         kfree_skb(skb);
 64         return 0;
 65 }
 66 EXPORT_SYMBOL_GPL(br_dev_queue_push_xmit);
 67 
 68 int br_forward_finish(struct sock *sk, struct sk_buff *skb)
 69 {
 70         return NF_HOOK(NFPROTO_BRIDGE, NF_BR_POST_ROUTING, sk, skb,
 71                        NULL, skb->dev,
 72                        br_dev_queue_push_xmit);
 73 
 74 }
 75 EXPORT_SYMBOL_GPL(br_forward_finish);
 76 
 77 static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
 78 {
 79         skb = br_handle_vlan(to->br, nbp_get_vlan_info(to), skb);
 80         if (!skb)
 81                 return;
 82 
 83         skb->dev = to->dev;
 84 
 85         if (unlikely(netpoll_tx_running(to->br->dev))) {
 86                 if (!is_skb_forwardable(skb->dev, skb))
 87                         kfree_skb(skb);
 88                 else {
 89                         skb_push(skb, ETH_HLEN);
 90                         br_netpoll_send_skb(to, skb);
 91                 }
 92                 return;
 93         }
 94 
 95         NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, NULL, skb,
 96                 NULL, skb->dev,
 97                 br_forward_finish);
 98 }
 99 
100 static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
101 {
102         struct net_device *indev;
103 
104         if (skb_warn_if_lro(skb)) {
105                 kfree_skb(skb);
106                 return;
107         }
108 
109         skb = br_handle_vlan(to->br, nbp_get_vlan_info(to), skb);
110         if (!skb)
111                 return;
112 
113         indev = skb->dev;
114         skb->dev = to->dev;
115         skb_forward_csum(skb);
116 
117         NF_HOOK(NFPROTO_BRIDGE, NF_BR_FORWARD, NULL, skb,
118                 indev, skb->dev,
119                 br_forward_finish);
120 }
121 
122 /* called with rcu_read_lock */
123 void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
124 {
125         if (to && should_deliver(to, skb)) {
126                 __br_deliver(to, skb);
127                 return;
128         }
129 
130         kfree_skb(skb);
131 }
132 EXPORT_SYMBOL_GPL(br_deliver);
133 
134 /* called with rcu_read_lock */
135 void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0)
136 {
137         if (should_deliver(to, skb)) {
138                 if (skb0)
139                         deliver_clone(to, skb, __br_forward);
140                 else
141                         __br_forward(to, skb);
142                 return;
143         }
144 
145         if (!skb0)
146                 kfree_skb(skb);
147 }
148 
149 static int deliver_clone(const struct net_bridge_port *prev,
150                          struct sk_buff *skb,
151                          void (*__packet_hook)(const struct net_bridge_port *p,
152                                                struct sk_buff *skb))
153 {
154         struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
155 
156         skb = skb_clone(skb, GFP_ATOMIC);
157         if (!skb) {
158                 dev->stats.tx_dropped++;
159                 return -ENOMEM;
160         }
161 
162         __packet_hook(prev, skb);
163         return 0;
164 }
165 
166 static struct net_bridge_port *maybe_deliver(
167         struct net_bridge_port *prev, struct net_bridge_port *p,
168         struct sk_buff *skb,
169         void (*__packet_hook)(const struct net_bridge_port *p,
170                               struct sk_buff *skb))
171 {
172         int err;
173 
174         if (!should_deliver(p, skb))
175                 return prev;
176 
177         if (!prev)
178                 goto out;
179 
180         err = deliver_clone(prev, skb, __packet_hook);
181         if (err)
182                 return ERR_PTR(err);
183 
184 out:
185         return p;
186 }
187 
188 /* called under bridge lock */
189 static void br_flood(struct net_bridge *br, struct sk_buff *skb,
190                      struct sk_buff *skb0,
191                      void (*__packet_hook)(const struct net_bridge_port *p,
192                                            struct sk_buff *skb),
193                      bool unicast)
194 {
195         struct net_bridge_port *p;
196         struct net_bridge_port *prev;
197 
198         prev = NULL;
199 
200         list_for_each_entry_rcu(p, &br->port_list, list) {
201                 /* Do not flood unicast traffic to ports that turn it off */
202                 if (unicast && !(p->flags & BR_FLOOD))
203                         continue;
204 
205                 /* Do not flood to ports that enable proxy ARP */
206                 if (p->flags & BR_PROXYARP)
207                         continue;
208                 if ((p->flags & BR_PROXYARP_WIFI) &&
209                     BR_INPUT_SKB_CB(skb)->proxyarp_replied)
210                         continue;
211 
212                 prev = maybe_deliver(prev, p, skb, __packet_hook);
213                 if (IS_ERR(prev))
214                         goto out;
215         }
216 
217         if (!prev)
218                 goto out;
219 
220         if (skb0)
221                 deliver_clone(prev, skb, __packet_hook);
222         else
223                 __packet_hook(prev, skb);
224         return;
225 
226 out:
227         if (!skb0)
228                 kfree_skb(skb);
229 }
230 
231 
232 /* called with rcu_read_lock */
233 void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb, bool unicast)
234 {
235         br_flood(br, skb, NULL, __br_deliver, unicast);
236 }
237 
238 /* called under bridge lock */
239 void br_flood_forward(struct net_bridge *br, struct sk_buff *skb,
240                       struct sk_buff *skb2, bool unicast)
241 {
242         br_flood(br, skb, skb2, __br_forward, unicast);
243 }
244 
245 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
246 /* called with rcu_read_lock */
247 static void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
248                                struct sk_buff *skb, struct sk_buff *skb0,
249                                void (*__packet_hook)(
250                                         const struct net_bridge_port *p,
251                                         struct sk_buff *skb))
252 {
253         struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
254         struct net_bridge *br = netdev_priv(dev);
255         struct net_bridge_port *prev = NULL;
256         struct net_bridge_port_group *p;
257         struct hlist_node *rp;
258 
259         rp = rcu_dereference(hlist_first_rcu(&br->router_list));
260         p = mdst ? rcu_dereference(mdst->ports) : NULL;
261         while (p || rp) {
262                 struct net_bridge_port *port, *lport, *rport;
263 
264                 lport = p ? p->port : NULL;
265                 rport = rp ? hlist_entry(rp, struct net_bridge_port, rlist) :
266                              NULL;
267 
268                 port = (unsigned long)lport > (unsigned long)rport ?
269                        lport : rport;
270 
271                 prev = maybe_deliver(prev, port, skb, __packet_hook);
272                 if (IS_ERR(prev))
273                         goto out;
274 
275                 if ((unsigned long)lport >= (unsigned long)port)
276                         p = rcu_dereference(p->next);
277                 if ((unsigned long)rport >= (unsigned long)port)
278                         rp = rcu_dereference(hlist_next_rcu(rp));
279         }
280 
281         if (!prev)
282                 goto out;
283 
284         if (skb0)
285                 deliver_clone(prev, skb, __packet_hook);
286         else
287                 __packet_hook(prev, skb);
288         return;
289 
290 out:
291         if (!skb0)
292                 kfree_skb(skb);
293 }
294 
295 /* called with rcu_read_lock */
296 void br_multicast_deliver(struct net_bridge_mdb_entry *mdst,
297                           struct sk_buff *skb)
298 {
299         br_multicast_flood(mdst, skb, NULL, __br_deliver);
300 }
301 
302 /* called with rcu_read_lock */
303 void br_multicast_forward(struct net_bridge_mdb_entry *mdst,
304                           struct sk_buff *skb, struct sk_buff *skb2)
305 {
306         br_multicast_flood(mdst, skb, skb2, __br_forward);
307 }
308 #endif
309 

This page was automatically generated by LXR 0.3.1 (source).  •  Linux is a registered trademark of Linus Torvalds  •  Contact us