]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - net/core/dev.c
xps: Improvements in TX queue selection
[net-next-2.6.git] / net / core / dev.c
index 0b403d5033111cf4a9de5e865f6949f84c4e2343..7b17674a29ec77f18247ddd0cc498d93c59ce85f 100644 (file)
@@ -1794,16 +1794,18 @@ struct sk_buff *skb_gso_segment(struct sk_buff *skb, int features)
        struct sk_buff *segs = ERR_PTR(-EPROTONOSUPPORT);
        struct packet_type *ptype;
        __be16 type = skb->protocol;
+       int vlan_depth = ETH_HLEN;
        int err;
 
-       if (type == htons(ETH_P_8021Q)) {
-               struct vlan_ethhdr *veh;
+       while (type == htons(ETH_P_8021Q)) {
+               struct vlan_hdr *vh;
 
-               if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN)))
+               if (unlikely(!pskb_may_pull(skb, vlan_depth + VLAN_HLEN)))
                        return ERR_PTR(-EINVAL);
 
-               veh = (struct vlan_ethhdr *)skb->data;
-               type = veh->h_vlan_encapsulated_proto;
+               vh = (struct vlan_hdr *)(skb->data + vlan_depth);
+               type = vh->h_vlan_encapsulated_proto;
+               vlan_depth += VLAN_HLEN;
        }
 
        skb_reset_mac_header(skb);
@@ -1966,6 +1968,23 @@ static inline void skb_orphan_try(struct sk_buff *skb)
        }
 }
 
+int netif_get_vlan_features(struct sk_buff *skb, struct net_device *dev)
+{
+       __be16 protocol = skb->protocol;
+
+       if (protocol == htons(ETH_P_8021Q)) {
+               struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data;
+               protocol = veh->h_vlan_encapsulated_proto;
+       } else if (!skb->vlan_tci)
+               return dev->features;
+
+       if (protocol != htons(ETH_P_8021Q))
+               return dev->features & dev->vlan_features;
+       else
+               return 0;
+}
+EXPORT_SYMBOL(netif_get_vlan_features);
+
 /*
  * Returns true if either:
  *     1. skb has frag_list and the device doesn't support FRAGLIST, or
@@ -2129,20 +2148,24 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev,
        int queue_index;
        const struct net_device_ops *ops = dev->netdev_ops;
 
-       if (ops->ndo_select_queue) {
+       if (dev->real_num_tx_queues == 1)
+               queue_index = 0;
+       else if (ops->ndo_select_queue) {
                queue_index = ops->ndo_select_queue(dev, skb);
                queue_index = dev_cap_txqueue(dev, queue_index);
        } else {
                struct sock *sk = skb->sk;
                queue_index = sk_tx_queue_get(sk);
-               if (queue_index < 0 || queue_index >= dev->real_num_tx_queues) {
 
-                       queue_index = 0;
-                       if (dev->real_num_tx_queues > 1)
-                               queue_index = skb_tx_hash(dev, skb);
+               if (queue_index < 0 || skb->ooo_okay ||
+                   queue_index >= dev->real_num_tx_queues) {
+                       int old_index = queue_index;
+
+                       queue_index = skb_tx_hash(dev, skb);
 
-                       if (sk) {
-                               struct dst_entry *dst = rcu_dereference_check(sk->sk_dst_cache, 1);
+                       if (queue_index != old_index && sk) {
+                               struct dst_entry *dst =
+                                   rcu_dereference_check(sk->sk_dst_cache, 1);
 
                                if (dst && skb_dst(skb) == dst)
                                        sk_tx_queue_set(sk, queue_index);
@@ -5033,12 +5056,8 @@ static int netif_alloc_rx_queues(struct net_device *dev)
        }
        dev->_rx = rx;
 
-       /*
-        * Set a pointer to first element in the array which holds the
-        * reference count.
-        */
        for (i = 0; i < count; i++)
-               rx[i].first = rx;
+               rx[i].dev = dev;
 #endif
        return 0;
 }
@@ -5114,14 +5133,6 @@ int register_netdevice(struct net_device *dev)
 
        dev->iflink = -1;
 
-       ret = netif_alloc_rx_queues(dev);
-       if (ret)
-               goto out;
-
-       ret = netif_alloc_netdev_queues(dev);
-       if (ret)
-               goto out;
-
        netdev_init_queues(dev);
 
        /* Init, if this function is available */
@@ -5581,10 +5592,14 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
 
        dev->num_tx_queues = queue_count;
        dev->real_num_tx_queues = queue_count;
+       if (netif_alloc_netdev_queues(dev))
+               goto free_pcpu;
 
 #ifdef CONFIG_RPS
        dev->num_rx_queues = queue_count;
        dev->real_num_rx_queues = queue_count;
+       if (netif_alloc_rx_queues(dev))
+               goto free_pcpu;
 #endif
 
        dev->gso_max_size = GSO_MAX_SIZE;
@@ -5601,6 +5616,11 @@ struct net_device *alloc_netdev_mq(int sizeof_priv, const char *name,
 
 free_pcpu:
        free_percpu(dev->pcpu_refcnt);
+       kfree(dev->_tx);
+#ifdef CONFIG_RPS
+       kfree(dev->_rx);
+#endif
+
 free_p:
        kfree(p);
        return NULL;
@@ -5622,6 +5642,9 @@ void free_netdev(struct net_device *dev)
        release_net(dev_net(dev));
 
        kfree(dev->_tx);
+#ifdef CONFIG_RPS
+       kfree(dev->_rx);
+#endif
 
        kfree(rcu_dereference_raw(dev->ingress_queue));