]> bbs.cooldavid.org Git - net-next-2.6.git/blob - net/bridge/br_forward.c
include cleanup: Update gfp.h and slab.h includes to prepare for breaking implicit...
[net-next-2.6.git] / net / bridge / br_forward.c
1 /*
2  *      Forwarding decision
3  *      Linux ethernet bridge
4  *
5  *      Authors:
6  *      Lennert Buytenhek               <buytenh@gnu.org>
7  *
8  *      This program is free software; you can redistribute it and/or
9  *      modify it under the terms of the GNU General Public License
10  *      as published by the Free Software Foundation; either version
11  *      2 of the License, or (at your option) any later version.
12  */
13
14 #include <linux/err.h>
15 #include <linux/slab.h>
16 #include <linux/kernel.h>
17 #include <linux/netdevice.h>
18 #include <linux/skbuff.h>
19 #include <linux/if_vlan.h>
20 #include <linux/netfilter_bridge.h>
21 #include "br_private.h"
22
23 static int deliver_clone(const struct net_bridge_port *prev,
24                          struct sk_buff *skb,
25                          void (*__packet_hook)(const struct net_bridge_port *p,
26                                                struct sk_buff *skb));
27
28 /* Don't forward packets to originating port or forwarding diasabled */
29 static inline int should_deliver(const struct net_bridge_port *p,
30                                  const struct sk_buff *skb)
31 {
32         return (((p->flags & BR_HAIRPIN_MODE) || skb->dev != p->dev) &&
33                 p->state == BR_STATE_FORWARDING);
34 }
35
36 static inline unsigned packet_length(const struct sk_buff *skb)
37 {
38         return skb->len - (skb->protocol == htons(ETH_P_8021Q) ? VLAN_HLEN : 0);
39 }
40
41 int br_dev_queue_push_xmit(struct sk_buff *skb)
42 {
43         /* drop mtu oversized packets except gso */
44         if (packet_length(skb) > skb->dev->mtu && !skb_is_gso(skb))
45                 kfree_skb(skb);
46         else {
47                 /* ip_refrag calls ip_fragment, doesn't copy the MAC header. */
48                 if (nf_bridge_maybe_copy_header(skb))
49                         kfree_skb(skb);
50                 else {
51                         skb_push(skb, ETH_HLEN);
52
53                         dev_queue_xmit(skb);
54                 }
55         }
56
57         return 0;
58 }
59
60 int br_forward_finish(struct sk_buff *skb)
61 {
62         return NF_HOOK(PF_BRIDGE, NF_BR_POST_ROUTING, skb, NULL, skb->dev,
63                        br_dev_queue_push_xmit);
64
65 }
66
67 static void __br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
68 {
69         skb->dev = to->dev;
70         NF_HOOK(PF_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev,
71                         br_forward_finish);
72 }
73
74 static void __br_forward(const struct net_bridge_port *to, struct sk_buff *skb)
75 {
76         struct net_device *indev;
77
78         if (skb_warn_if_lro(skb)) {
79                 kfree_skb(skb);
80                 return;
81         }
82
83         indev = skb->dev;
84         skb->dev = to->dev;
85         skb_forward_csum(skb);
86
87         NF_HOOK(PF_BRIDGE, NF_BR_FORWARD, skb, indev, skb->dev,
88                         br_forward_finish);
89 }
90
91 /* called with rcu_read_lock */
92 void br_deliver(const struct net_bridge_port *to, struct sk_buff *skb)
93 {
94         if (should_deliver(to, skb)) {
95                 __br_deliver(to, skb);
96                 return;
97         }
98
99         kfree_skb(skb);
100 }
101
102 /* called with rcu_read_lock */
103 void br_forward(const struct net_bridge_port *to, struct sk_buff *skb, struct sk_buff *skb0)
104 {
105         if (should_deliver(to, skb)) {
106                 if (skb0)
107                         deliver_clone(to, skb, __br_forward);
108                 else
109                         __br_forward(to, skb);
110                 return;
111         }
112
113         if (!skb0)
114                 kfree_skb(skb);
115 }
116
117 static int deliver_clone(const struct net_bridge_port *prev,
118                          struct sk_buff *skb,
119                          void (*__packet_hook)(const struct net_bridge_port *p,
120                                                struct sk_buff *skb))
121 {
122         skb = skb_clone(skb, GFP_ATOMIC);
123         if (!skb) {
124                 struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
125
126                 dev->stats.tx_dropped++;
127                 return -ENOMEM;
128         }
129
130         __packet_hook(prev, skb);
131         return 0;
132 }
133
134 static struct net_bridge_port *maybe_deliver(
135         struct net_bridge_port *prev, struct net_bridge_port *p,
136         struct sk_buff *skb,
137         void (*__packet_hook)(const struct net_bridge_port *p,
138                               struct sk_buff *skb))
139 {
140         int err;
141
142         if (!should_deliver(p, skb))
143                 return prev;
144
145         if (!prev)
146                 goto out;
147
148         err = deliver_clone(prev, skb, __packet_hook);
149         if (err)
150                 return ERR_PTR(err);
151
152 out:
153         return p;
154 }
155
156 /* called under bridge lock */
157 static void br_flood(struct net_bridge *br, struct sk_buff *skb,
158                      struct sk_buff *skb0,
159                      void (*__packet_hook)(const struct net_bridge_port *p,
160                                            struct sk_buff *skb))
161 {
162         struct net_bridge_port *p;
163         struct net_bridge_port *prev;
164
165         prev = NULL;
166
167         list_for_each_entry_rcu(p, &br->port_list, list) {
168                 prev = maybe_deliver(prev, p, skb, __packet_hook);
169                 if (IS_ERR(prev))
170                         goto out;
171         }
172
173         if (!prev)
174                 goto out;
175
176         if (skb0)
177                 deliver_clone(prev, skb, __packet_hook);
178         else
179                 __packet_hook(prev, skb);
180         return;
181
182 out:
183         if (!skb0)
184                 kfree_skb(skb);
185 }
186
187
188 /* called with rcu_read_lock */
189 void br_flood_deliver(struct net_bridge *br, struct sk_buff *skb)
190 {
191         br_flood(br, skb, NULL, __br_deliver);
192 }
193
194 /* called under bridge lock */
195 void br_flood_forward(struct net_bridge *br, struct sk_buff *skb,
196                       struct sk_buff *skb2)
197 {
198         br_flood(br, skb, skb2, __br_forward);
199 }
200
201 #ifdef CONFIG_BRIDGE_IGMP_SNOOPING
202 /* called with rcu_read_lock */
203 static void br_multicast_flood(struct net_bridge_mdb_entry *mdst,
204                                struct sk_buff *skb, struct sk_buff *skb0,
205                                void (*__packet_hook)(
206                                         const struct net_bridge_port *p,
207                                         struct sk_buff *skb))
208 {
209         struct net_device *dev = BR_INPUT_SKB_CB(skb)->brdev;
210         struct net_bridge *br = netdev_priv(dev);
211         struct net_bridge_port *port;
212         struct net_bridge_port *lport, *rport;
213         struct net_bridge_port *prev;
214         struct net_bridge_port_group *p;
215         struct hlist_node *rp;
216
217         prev = NULL;
218
219         rp = br->router_list.first;
220         p = mdst ? mdst->ports : NULL;
221         while (p || rp) {
222                 lport = p ? p->port : NULL;
223                 rport = rp ? hlist_entry(rp, struct net_bridge_port, rlist) :
224                              NULL;
225
226                 port = (unsigned long)lport > (unsigned long)rport ?
227                        lport : rport;
228
229                 prev = maybe_deliver(prev, port, skb, __packet_hook);
230                 if (IS_ERR(prev))
231                         goto out;
232
233                 if ((unsigned long)lport >= (unsigned long)port)
234                         p = p->next;
235                 if ((unsigned long)rport >= (unsigned long)port)
236                         rp = rp->next;
237         }
238
239         if (!prev)
240                 goto out;
241
242         if (skb0)
243                 deliver_clone(prev, skb, __packet_hook);
244         else
245                 __packet_hook(prev, skb);
246         return;
247
248 out:
249         if (!skb0)
250                 kfree_skb(skb);
251 }
252
253 /* called with rcu_read_lock */
254 void br_multicast_deliver(struct net_bridge_mdb_entry *mdst,
255                           struct sk_buff *skb)
256 {
257         br_multicast_flood(mdst, skb, NULL, __br_deliver);
258 }
259
260 /* called with rcu_read_lock */
261 void br_multicast_forward(struct net_bridge_mdb_entry *mdst,
262                           struct sk_buff *skb, struct sk_buff *skb2)
263 {
264         br_multicast_flood(mdst, skb, skb2, __br_forward);
265 }
266 #endif