]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
vlan: lockless transmit path
authorEric Dumazet <eric.dumazet@gmail.com>
Wed, 10 Nov 2010 23:42:00 +0000 (23:42 +0000)
committerDavid S. Miller <davem@davemloft.net>
Tue, 16 Nov 2010 19:15:08 +0000 (11:15 -0800)
vlan is a stacked device, like tunnels. We should use the lockless
mechanism we are using in tunnels and loopback.

This patch completely removes locking in TX path.

tx stat counters are added into existing percpu stat structure, renamed
from vlan_rx_stats to vlan_pcpu_stats.

Note : this partially reverts commit 2e59af3dcbdf (vlan: multiqueue vlan
device)

Signed-off-by: Eric Dumazet <eric.dumazet@gmail.com>
Cc: Patrick McHardy <kaber@trash.net>
Signed-off-by: David S. Miller <davem@davemloft.net>
net/8021q/vlan.c
net/8021q/vlan.h
net/8021q/vlan_core.c
net/8021q/vlan_dev.c
net/8021q/vlan_netlink.c

index 55d2135889fc57616191e9322da005a9149a07e4..dc1071327d87a929893a8b02b8cfbf69e80dc562 100644 (file)
@@ -272,13 +272,11 @@ static int register_vlan_device(struct net_device *real_dev, u16 vlan_id)
                snprintf(name, IFNAMSIZ, "vlan%.4i", vlan_id);
        }
 
-       new_dev = alloc_netdev_mq(sizeof(struct vlan_dev_info), name,
-                                 vlan_setup, real_dev->num_tx_queues);
+       new_dev = alloc_netdev(sizeof(struct vlan_dev_info), name, vlan_setup);
 
        if (new_dev == NULL)
                return -ENOBUFS;
 
-       netif_copy_real_num_queues(new_dev, real_dev);
        dev_net_set(new_dev, net);
        /* need 4 bytes for extra VLAN header info,
         * hope the underlying device can handle it.
index 4625ba64dfdcce10173c6047bef7393c1fbd4d96..5687c9b95f33b7f70234e0d3ef7220fa5f31f1b1 100644 (file)
@@ -19,19 +19,25 @@ struct vlan_priority_tci_mapping {
 
 
 /**
- *     struct vlan_rx_stats - VLAN percpu rx stats
+ *     struct vlan_pcpu_stats - VLAN percpu rx/tx stats
  *     @rx_packets: number of received packets
  *     @rx_bytes: number of received bytes
  *     @rx_multicast: number of received multicast packets
+ *     @tx_packets: number of transmitted packets
+ *     @tx_bytes: number of transmitted bytes
  *     @syncp: synchronization point for 64bit counters
- *     @rx_errors: number of errors
+ *     @rx_errors: number of rx errors
+ *     @tx_dropped: number of tx drops
  */
-struct vlan_rx_stats {
+struct vlan_pcpu_stats {
        u64                     rx_packets;
        u64                     rx_bytes;
        u64                     rx_multicast;
+       u64                     tx_packets;
+       u64                     tx_bytes;
        struct u64_stats_sync   syncp;
-       unsigned long           rx_errors;
+       u32                     rx_errors;
+       u32                     tx_dropped;
 };
 
 /**
@@ -45,7 +51,7 @@ struct vlan_rx_stats {
  *     @real_dev: underlying netdevice
  *     @real_dev_addr: address of underlying netdevice
  *     @dent: proc dir entry
- *     @vlan_rx_stats: ptr to percpu rx stats
+ *     @vlan_pcpu_stats: ptr to percpu rx stats
  */
 struct vlan_dev_info {
        unsigned int                            nr_ingress_mappings;
@@ -60,7 +66,7 @@ struct vlan_dev_info {
        unsigned char                           real_dev_addr[ETH_ALEN];
 
        struct proc_dir_entry                   *dent;
-       struct vlan_rx_stats __percpu           *vlan_rx_stats;
+       struct vlan_pcpu_stats __percpu         *vlan_pcpu_stats;
 };
 
 static inline struct vlan_dev_info *vlan_dev_info(const struct net_device *dev)
index 69b2f79800a52c3e07e0f7c668daa1028025931e..ce8e3ab3e7a5ab6d0f0000e699fbca27f4629d9b 100644 (file)
@@ -9,7 +9,7 @@ bool vlan_hwaccel_do_receive(struct sk_buff **skbp)
        struct sk_buff *skb = *skbp;
        u16 vlan_id = skb->vlan_tci & VLAN_VID_MASK;
        struct net_device *vlan_dev;
-       struct vlan_rx_stats *rx_stats;
+       struct vlan_pcpu_stats *rx_stats;
 
        vlan_dev = vlan_find_dev(skb->dev, vlan_id);
        if (!vlan_dev) {
@@ -26,7 +26,7 @@ bool vlan_hwaccel_do_receive(struct sk_buff **skbp)
        skb->priority = vlan_get_ingress_priority(vlan_dev, skb->vlan_tci);
        skb->vlan_tci = 0;
 
-       rx_stats = this_cpu_ptr(vlan_dev_info(vlan_dev)->vlan_rx_stats);
+       rx_stats = this_cpu_ptr(vlan_dev_info(vlan_dev)->vlan_pcpu_stats);
 
        u64_stats_update_begin(&rx_stats->syncp);
        rx_stats->rx_packets++;
index f3c9552f6ba82757a72be9c6d8abd162c861094a..2fa3f4a3f60f07bc9982a2828f16bff4285256ed 100644 (file)
@@ -141,7 +141,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
                  struct packet_type *ptype, struct net_device *orig_dev)
 {
        struct vlan_hdr *vhdr;
-       struct vlan_rx_stats *rx_stats;
+       struct vlan_pcpu_stats *rx_stats;
        struct net_device *vlan_dev;
        u16 vlan_id;
        u16 vlan_tci;
@@ -177,7 +177,7 @@ int vlan_skb_recv(struct sk_buff *skb, struct net_device *dev,
        } else {
                skb->dev = vlan_dev;
 
-               rx_stats = this_cpu_ptr(vlan_dev_info(skb->dev)->vlan_rx_stats);
+               rx_stats = this_cpu_ptr(vlan_dev_info(skb->dev)->vlan_pcpu_stats);
 
                u64_stats_update_begin(&rx_stats->syncp);
                rx_stats->rx_packets++;
@@ -310,8 +310,6 @@ static int vlan_dev_hard_header(struct sk_buff *skb, struct net_device *dev,
 static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
                                            struct net_device *dev)
 {
-       int i = skb_get_queue_mapping(skb);
-       struct netdev_queue *txq = netdev_get_tx_queue(dev, i);
        struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data);
        unsigned int len;
        int ret;
@@ -334,10 +332,16 @@ static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb,
        ret = dev_queue_xmit(skb);
 
        if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) {
-               txq->tx_packets++;
-               txq->tx_bytes += len;
-       } else
-               txq->tx_dropped++;
+               struct vlan_pcpu_stats *stats;
+
+               stats = this_cpu_ptr(vlan_dev_info(dev)->vlan_pcpu_stats);
+               u64_stats_update_begin(&stats->syncp);
+               stats->tx_packets++;
+               stats->tx_bytes += len;
+               u64_stats_update_begin(&stats->syncp);
+       } else {
+               this_cpu_inc(vlan_dev_info(dev)->vlan_pcpu_stats->tx_dropped);
+       }
 
        return ret;
 }
@@ -696,6 +700,7 @@ static int vlan_dev_init(struct net_device *dev)
                      (1<<__LINK_STATE_PRESENT);
 
        dev->features |= real_dev->features & real_dev->vlan_features;
+       dev->features |= NETIF_F_LLTX;
        dev->gso_max_size = real_dev->gso_max_size;
 
        /* ipv6 shared card related stuff */
@@ -728,8 +733,8 @@ static int vlan_dev_init(struct net_device *dev)
 
        vlan_dev_set_lockdep_class(dev, subclass);
 
-       vlan_dev_info(dev)->vlan_rx_stats = alloc_percpu(struct vlan_rx_stats);
-       if (!vlan_dev_info(dev)->vlan_rx_stats)
+       vlan_dev_info(dev)->vlan_pcpu_stats = alloc_percpu(struct vlan_pcpu_stats);
+       if (!vlan_dev_info(dev)->vlan_pcpu_stats)
                return -ENOMEM;
 
        return 0;
@@ -741,8 +746,8 @@ static void vlan_dev_uninit(struct net_device *dev)
        struct vlan_dev_info *vlan = vlan_dev_info(dev);
        int i;
 
-       free_percpu(vlan->vlan_rx_stats);
-       vlan->vlan_rx_stats = NULL;
+       free_percpu(vlan->vlan_pcpu_stats);
+       vlan->vlan_pcpu_stats = NULL;
        for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) {
                while ((pm = vlan->egress_priority_map[i]) != NULL) {
                        vlan->egress_priority_map[i] = pm->next;
@@ -780,33 +785,37 @@ static u32 vlan_ethtool_get_flags(struct net_device *dev)
 
 static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
 {
-       dev_txq_stats_fold(dev, stats);
 
-       if (vlan_dev_info(dev)->vlan_rx_stats) {
-               struct vlan_rx_stats *p, accum = {0};
+       if (vlan_dev_info(dev)->vlan_pcpu_stats) {
+               struct vlan_pcpu_stats *p;
+               u32 rx_errors = 0, tx_dropped = 0;
                int i;
 
                for_each_possible_cpu(i) {
-                       u64 rxpackets, rxbytes, rxmulticast;
+                       u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes;
                        unsigned int start;
 
-                       p = per_cpu_ptr(vlan_dev_info(dev)->vlan_rx_stats, i);
+                       p = per_cpu_ptr(vlan_dev_info(dev)->vlan_pcpu_stats, i);
                        do {
                                start = u64_stats_fetch_begin_bh(&p->syncp);
                                rxpackets       = p->rx_packets;
                                rxbytes         = p->rx_bytes;
                                rxmulticast     = p->rx_multicast;
+                               txpackets       = p->tx_packets;
+                               txbytes         = p->tx_bytes;
                        } while (u64_stats_fetch_retry_bh(&p->syncp, start));
-                       accum.rx_packets += rxpackets;
-                       accum.rx_bytes   += rxbytes;
-                       accum.rx_multicast += rxmulticast;
-                       /* rx_errors is ulong, not protected by syncp */
-                       accum.rx_errors  += p->rx_errors;
+
+                       stats->rx_packets       += rxpackets;
+                       stats->rx_bytes         += rxbytes;
+                       stats->multicast        += rxmulticast;
+                       stats->tx_packets       += txpackets;
+                       stats->tx_bytes         += txbytes;
+                       /* rx_errors & tx_dropped are u32 */
+                       rx_errors       += p->rx_errors;
+                       tx_dropped      += p->tx_dropped;
                }
-               stats->rx_packets = accum.rx_packets;
-               stats->rx_bytes   = accum.rx_bytes;
-               stats->rx_errors  = accum.rx_errors;
-               stats->multicast  = accum.rx_multicast;
+               stats->rx_errors  = rx_errors;
+               stats->tx_dropped = tx_dropped;
        }
        return stats;
 }
index ddc105734af7ae5664d3199ba8f4467f53a84175..be9a5c19a775aa64751b93795ccd5df1a453297a 100644 (file)
@@ -101,25 +101,6 @@ static int vlan_changelink(struct net_device *dev,
        return 0;
 }
 
-static int vlan_get_tx_queues(struct net *net,
-                             struct nlattr *tb[],
-                             unsigned int *num_tx_queues,
-                             unsigned int *real_num_tx_queues)
-{
-       struct net_device *real_dev;
-
-       if (!tb[IFLA_LINK])
-               return -EINVAL;
-
-       real_dev = __dev_get_by_index(net, nla_get_u32(tb[IFLA_LINK]));
-       if (!real_dev)
-               return -ENODEV;
-
-       *num_tx_queues      = real_dev->num_tx_queues;
-       *real_num_tx_queues = real_dev->real_num_tx_queues;
-       return 0;
-}
-
 static int vlan_newlink(struct net *src_net, struct net_device *dev,
                        struct nlattr *tb[], struct nlattr *data[])
 {
@@ -237,7 +218,6 @@ struct rtnl_link_ops vlan_link_ops __read_mostly = {
        .maxtype        = IFLA_VLAN_MAX,
        .policy         = vlan_policy,
        .priv_size      = sizeof(struct vlan_dev_info),
-       .get_tx_queues  = vlan_get_tx_queues,
        .setup          = vlan_setup,
        .validate       = vlan_validate,
        .newlink        = vlan_newlink,