]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - net/core/dev.c
Merge branch 'master' of master.kernel.org:/pub/scm/linux/kernel/git/davem/net-2.6
[net-next-2.6.git] / net / core / dev.c
index c1dc8a95f6ffc2de41a3cda887e2a2434496a89a..fc2dc933bee5b6053cd0ee8c92646ec1fb143714 100644 (file)
@@ -371,6 +371,14 @@ static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
  *                                                     --ANK (980803)
  */
 
+static inline struct list_head *ptype_head(const struct packet_type *pt)
+{
+       if (pt->type == htons(ETH_P_ALL))
+               return &ptype_all;
+       else
+               return &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
+}
+
 /**
  *     dev_add_pack - add packet handler
  *     @pt: packet type declaration
@@ -386,16 +394,11 @@ static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
 
 void dev_add_pack(struct packet_type *pt)
 {
-       int hash;
+       struct list_head *head = ptype_head(pt);
 
-       spin_lock_bh(&ptype_lock);
-       if (pt->type == htons(ETH_P_ALL))
-               list_add_rcu(&pt->list, &ptype_all);
-       else {
-               hash = ntohs(pt->type) & PTYPE_HASH_MASK;
-               list_add_rcu(&pt->list, &ptype_base[hash]);
-       }
-       spin_unlock_bh(&ptype_lock);
+       spin_lock(&ptype_lock);
+       list_add_rcu(&pt->list, head);
+       spin_unlock(&ptype_lock);
 }
 EXPORT_SYMBOL(dev_add_pack);
 
@@ -414,15 +417,10 @@ EXPORT_SYMBOL(dev_add_pack);
  */
 void __dev_remove_pack(struct packet_type *pt)
 {
-       struct list_head *head;
+       struct list_head *head = ptype_head(pt);
        struct packet_type *pt1;
 
-       spin_lock_bh(&ptype_lock);
-
-       if (pt->type == htons(ETH_P_ALL))
-               head = &ptype_all;
-       else
-               head = &ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
+       spin_lock(&ptype_lock);
 
        list_for_each_entry(pt1, head, list) {
                if (pt == pt1) {
@@ -433,7 +431,7 @@ void __dev_remove_pack(struct packet_type *pt)
 
        printk(KERN_WARNING "dev_remove_pack: %p not found.\n", pt);
 out:
-       spin_unlock_bh(&ptype_lock);
+       spin_unlock(&ptype_lock);
 }
 EXPORT_SYMBOL(__dev_remove_pack);
 
@@ -1930,7 +1928,7 @@ static inline int skb_needs_linearize(struct sk_buff *skb,
                                      struct net_device *dev)
 {
        return skb_is_nonlinear(skb) &&
-              ((skb_has_frags(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
+              ((skb_has_frag_list(skb) && !(dev->features & NETIF_F_FRAGLIST)) ||
                (skb_shinfo(skb)->nr_frags && (!(dev->features & NETIF_F_SG) ||
                                              illegal_highdma(dev, skb))));
 }
@@ -2058,16 +2056,16 @@ static struct netdev_queue *dev_pick_tx(struct net_device *dev,
                                        struct sk_buff *skb)
 {
        int queue_index;
-       struct sock *sk = skb->sk;
+       const struct net_device_ops *ops = dev->netdev_ops;
 
-       queue_index = sk_tx_queue_get(sk);
-       if (queue_index < 0) {
-               const struct net_device_ops *ops = dev->netdev_ops;
+       if (ops->ndo_select_queue) {
+               queue_index = ops->ndo_select_queue(dev, skb);
+               queue_index = dev_cap_txqueue(dev, queue_index);
+       } else {
+               struct sock *sk = skb->sk;
+               queue_index = sk_tx_queue_get(sk);
+               if (queue_index < 0) {
 
-               if (ops->ndo_select_queue) {
-                       queue_index = ops->ndo_select_queue(dev, skb);
-                       queue_index = dev_cap_txqueue(dev, queue_index);
-               } else {
                        queue_index = 0;
                        if (dev->real_num_tx_queues > 1)
                                queue_index = skb_tx_hash(dev, skb);
@@ -2266,7 +2264,7 @@ static inline void ____napi_schedule(struct softnet_data *sd,
  */
 __u32 __skb_get_rxhash(struct sk_buff *skb)
 {
-       int nhoff, hash = 0;
+       int nhoff, hash = 0, poff;
        struct ipv6hdr *ip6;
        struct iphdr *ip;
        u8 ip_proto;
@@ -2283,8 +2281,11 @@ __u32 __skb_get_rxhash(struct sk_buff *skb)
                if (!pskb_may_pull(skb, sizeof(*ip) + nhoff))
                        goto done;
 
-               ip = (struct iphdr *) skb->data + nhoff;
-               ip_proto = ip->protocol;
+               ip = (struct iphdr *) (skb->data + nhoff);
+               if (ip->frag_off & htons(IP_MF | IP_OFFSET))
+                       ip_proto = 0;
+               else
+                       ip_proto = ip->protocol;
                addr1 = (__force u32) ip->saddr;
                addr2 = (__force u32) ip->daddr;
                ihl = ip->ihl;
@@ -2293,7 +2294,7 @@ __u32 __skb_get_rxhash(struct sk_buff *skb)
                if (!pskb_may_pull(skb, sizeof(*ip6) + nhoff))
                        goto done;
 
-               ip6 = (struct ipv6hdr *) skb->data + nhoff;
+               ip6 = (struct ipv6hdr *) (skb->data + nhoff);
                ip_proto = ip6->nexthdr;
                addr1 = (__force u32) ip6->saddr.s6_addr32[3];
                addr2 = (__force u32) ip6->daddr.s6_addr32[3];
@@ -2303,24 +2304,15 @@ __u32 __skb_get_rxhash(struct sk_buff *skb)
                goto done;
        }
 
-       switch (ip_proto) {
-       case IPPROTO_TCP:
-       case IPPROTO_UDP:
-       case IPPROTO_DCCP:
-       case IPPROTO_ESP:
-       case IPPROTO_AH:
-       case IPPROTO_SCTP:
-       case IPPROTO_UDPLITE:
-               if (pskb_may_pull(skb, (ihl * 4) + 4 + nhoff)) {
-                       ports.v32 = * (__force u32 *) (skb->data + nhoff +
-                                                      (ihl * 4));
+       ports.v32 = 0;
+       poff = proto_ports_offset(ip_proto);
+       if (poff >= 0) {
+               nhoff += ihl * 4 + poff;
+               if (pskb_may_pull(skb, nhoff + 4)) {
+                       ports.v32 = * (__force u32 *) (skb->data + nhoff);
                        if (ports.v16[1] < ports.v16[0])
                                swap(ports.v16[0], ports.v16[1]);
-                       break;
                }
-       default:
-               ports.v32 = 0;
-               break;
        }
 
        /* get a consistent hash (same value on both flow directions) */
@@ -2351,7 +2343,7 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
                       struct rps_dev_flow **rflowp)
 {
        struct netdev_rx_queue *rxqueue;
-       struct rps_map *map;
+       struct rps_map *map = NULL;
        struct rps_dev_flow_table *flow_table;
        struct rps_sock_flow_table *sock_flow_table;
        int cpu = -1;
@@ -2369,9 +2361,19 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
        } else
                rxqueue = dev->_rx;
 
-       if (!rxqueue->rps_map && !rxqueue->rps_flow_table)
+       if (rxqueue->rps_map) {
+               map = rcu_dereference(rxqueue->rps_map);
+               if (map && map->len == 1) {
+                       tcpu = map->cpus[0];
+                       if (cpu_online(tcpu))
+                               cpu = tcpu;
+                       goto done;
+               }
+       } else if (!rxqueue->rps_flow_table) {
                goto done;
+       }
 
+       skb_reset_network_header(skb);
        if (!skb_get_rxhash(skb))
                goto done;
 
@@ -2414,7 +2416,6 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb,
                }
        }
 
-       map = rcu_dereference(rxqueue->rps_map);
        if (map) {
                tcpu = map->cpus[((u64) skb->rxhash * map->len) >> 32];
 
@@ -2846,8 +2847,8 @@ static int __netif_receive_skb(struct sk_buff *skb)
        if (!netdev_tstamp_prequeue)
                net_timestamp_check(skb);
 
-       if (vlan_tx_tag_present(skb) && vlan_hwaccel_do_receive(skb))
-               return NET_RX_SUCCESS;
+       if (vlan_tx_tag_present(skb))
+               vlan_hwaccel_do_receive(skb);
 
        /* if we've gotten here through NAPI, check netpoll */
        if (netpoll_receive_skb(skb))
@@ -3068,7 +3069,7 @@ out:
        return netif_receive_skb(skb);
 }
 
-static void napi_gro_flush(struct napi_struct *napi)
+inline void napi_gro_flush(struct napi_struct *napi)
 {
        struct sk_buff *skb, *next;
 
@@ -3081,6 +3082,7 @@ static void napi_gro_flush(struct napi_struct *napi)
        napi->gro_count = 0;
        napi->gro_list = NULL;
 }
+EXPORT_SYMBOL(napi_gro_flush);
 
 enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
 {
@@ -3095,7 +3097,7 @@ enum gro_result dev_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
        if (!(skb->dev->features & NETIF_F_GRO) || netpoll_rx_on(skb))
                goto normal;
 
-       if (skb_is_gso(skb) || skb_has_frags(skb))
+       if (skb_is_gso(skb) || skb_has_frag_list(skb))
                goto normal;
 
        rcu_read_lock();
@@ -3161,7 +3163,7 @@ pull:
                        put_page(skb_shinfo(skb)->frags[0].page);
                        memmove(skb_shinfo(skb)->frags,
                                skb_shinfo(skb)->frags + 1,
-                               --skb_shinfo(skb)->nr_frags);
+                               --skb_shinfo(skb)->nr_frags * sizeof(skb_frag_t));
                }
        }
 
@@ -3174,16 +3176,18 @@ normal:
 }
 EXPORT_SYMBOL(dev_gro_receive);
 
-static gro_result_t
+static inline gro_result_t
 __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb)
 {
        struct sk_buff *p;
 
        for (p = napi->gro_list; p; p = p->next) {
-               NAPI_GRO_CB(p)->same_flow =
-                       (p->dev == skb->dev) &&
-                       !compare_ether_header(skb_mac_header(p),
+               unsigned long diffs;
+
+               diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev;
+               diffs |= compare_ether_header(skb_mac_header(p),
                                              skb_gro_mac_header(skb));
+               NAPI_GRO_CB(p)->same_flow = !diffs;
                NAPI_GRO_CB(p)->flush = 0;
        }