]> bbs.cooldavid.org Git - net-next-2.6.git/blob - net/8021q/vlan_core.c
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[net-next-2.6.git] / net / 8021q / vlan_core.c
1 #include <linux/skbuff.h>
2 #include <linux/netdevice.h>
3 #include <linux/if_vlan.h>
4 #include <linux/netpoll.h>
5 #include "vlan.h"
6
7 /* VLAN rx hw acceleration helper.  This acts like netif_{rx,receive_skb}(). */
8 int __vlan_hwaccel_rx(struct sk_buff *skb, struct vlan_group *grp,
9                       u16 vlan_tci, int polling)
10 {
11         struct net_device *vlan_dev;
12         u16 vlan_id;
13
14         if (netpoll_rx(skb))
15                 return NET_RX_DROP;
16
17         if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master)))
18                 skb->deliver_no_wcard = 1;
19
20         skb->skb_iif = skb->dev->ifindex;
21         __vlan_hwaccel_put_tag(skb, vlan_tci);
22         vlan_id = vlan_tci & VLAN_VID_MASK;
23         vlan_dev = vlan_group_get_device(grp, vlan_id);
24
25         if (vlan_dev)
26                 skb->dev = vlan_dev;
27         else if (vlan_id) {
28                 if (!(skb->dev->flags & IFF_PROMISC))
29                         goto drop;
30                 skb->pkt_type = PACKET_OTHERHOST;
31         }
32
33         return polling ? netif_receive_skb(skb) : netif_rx(skb);
34
35 drop:
36         dev_kfree_skb_any(skb);
37         return NET_RX_DROP;
38 }
39 EXPORT_SYMBOL(__vlan_hwaccel_rx);
40
41 void vlan_hwaccel_do_receive(struct sk_buff *skb)
42 {
43         struct net_device *dev = skb->dev;
44         struct vlan_rx_stats     *rx_stats;
45
46         skb->dev = vlan_dev_real_dev(dev);
47         netif_nit_deliver(skb);
48
49         skb->dev = dev;
50         skb->priority = vlan_get_ingress_priority(dev, skb->vlan_tci);
51         skb->vlan_tci = 0;
52
53         rx_stats = this_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats);
54
55         u64_stats_update_begin(&rx_stats->syncp);
56         rx_stats->rx_packets++;
57         rx_stats->rx_bytes += skb->len;
58
59         switch (skb->pkt_type) {
60         case PACKET_BROADCAST:
61                 break;
62         case PACKET_MULTICAST:
63                 rx_stats->rx_multicast++;
64                 break;
65         case PACKET_OTHERHOST:
66                 /* Our lower layer thinks this is not local, let's make sure.
67                  * This allows the VLAN to have a different MAC than the
68                  * underlying device, and still route correctly. */
69                 if (!compare_ether_addr(eth_hdr(skb)->h_dest,
70                                         dev->dev_addr))
71                         skb->pkt_type = PACKET_HOST;
72                 break;
73         }
74         u64_stats_update_end(&rx_stats->syncp);
75 }
76
77 struct net_device *vlan_dev_real_dev(const struct net_device *dev)
78 {
79         return vlan_dev_info(dev)->real_dev;
80 }
81 EXPORT_SYMBOL(vlan_dev_real_dev);
82
83 u16 vlan_dev_vlan_id(const struct net_device *dev)
84 {
85         return vlan_dev_info(dev)->vlan_id;
86 }
87 EXPORT_SYMBOL(vlan_dev_vlan_id);
88
89 static gro_result_t
90 vlan_gro_common(struct napi_struct *napi, struct vlan_group *grp,
91                 unsigned int vlan_tci, struct sk_buff *skb)
92 {
93         struct sk_buff *p;
94         struct net_device *vlan_dev;
95         u16 vlan_id;
96
97         if (skb_bond_should_drop(skb, ACCESS_ONCE(skb->dev->master)))
98                 skb->deliver_no_wcard = 1;
99
100         skb->skb_iif = skb->dev->ifindex;
101         __vlan_hwaccel_put_tag(skb, vlan_tci);
102         vlan_id = vlan_tci & VLAN_VID_MASK;
103         vlan_dev = vlan_group_get_device(grp, vlan_id);
104
105         if (vlan_dev)
106                 skb->dev = vlan_dev;
107         else if (vlan_id) {
108                 if (!(skb->dev->flags & IFF_PROMISC))
109                         goto drop;
110                 skb->pkt_type = PACKET_OTHERHOST;
111         }
112
113         for (p = napi->gro_list; p; p = p->next) {
114                 unsigned long diffs;
115
116                 diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
117                 diffs |= compare_ether_header(skb_mac_header(p),
118                                               skb_gro_mac_header(skb));
119                 NAPI_GRO_CB(p)->same_flow = !diffs;
120                 NAPI_GRO_CB(p)->flush = 0;
121         }
122
123         return dev_gro_receive(napi, skb);
124
125 drop:
126         return GRO_DROP;
127 }
128
129 gro_result_t vlan_gro_receive(struct napi_struct *napi, struct vlan_group *grp,
130                               unsigned int vlan_tci, struct sk_buff *skb)
131 {
132         if (netpoll_rx_on(skb))
133                 return vlan_hwaccel_receive_skb(skb, grp, vlan_tci)
134                         ? GRO_DROP : GRO_NORMAL;
135
136         skb_gro_reset_offset(skb);
137
138         return napi_skb_finish(vlan_gro_common(napi, grp, vlan_tci, skb), skb);
139 }
140 EXPORT_SYMBOL(vlan_gro_receive);
141
142 gro_result_t vlan_gro_frags(struct napi_struct *napi, struct vlan_group *grp,
143                             unsigned int vlan_tci)
144 {
145         struct sk_buff *skb = napi_frags_skb(napi);
146
147         if (!skb)
148                 return GRO_DROP;
149
150         if (netpoll_rx_on(skb)) {
151                 skb->protocol = eth_type_trans(skb, skb->dev);
152                 return vlan_hwaccel_receive_skb(skb, grp, vlan_tci)
153                         ? GRO_DROP : GRO_NORMAL;
154         }
155
156         return napi_frags_finish(napi, skb,
157                                  vlan_gro_common(napi, grp, vlan_tci, skb));
158 }
159 EXPORT_SYMBOL(vlan_gro_frags);