]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - net/core/dev.c
ipv6: Refactor update of IPv6 flowi destination address for srcrt (RH) option
[net-next-2.6.git] / net / core / dev.c
index 1845b08c624e1fa4c9dd398f4b61c2963dbfc1f2..ffca5c1066fa50947c4e5b88a86a47cace0ec57b 100644 (file)
@@ -1577,7 +1577,9 @@ EXPORT_SYMBOL(__netif_schedule);
 
 void dev_kfree_skb_irq(struct sk_buff *skb)
 {
-       if (atomic_dec_and_test(&skb->users)) {
+       if (!skb->destructor)
+               dev_kfree_skb(skb);
+       else if (atomic_dec_and_test(&skb->users)) {
                struct softnet_data *sd;
                unsigned long flags;
 
@@ -2038,14 +2040,24 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
                                 struct netdev_queue *txq)
 {
        spinlock_t *root_lock = qdisc_lock(q);
+       bool contended = qdisc_is_running(q);
        int rc;
 
+       /*
+        * Heuristic to force contended enqueues to serialize on a
+        * separate lock before trying to get qdisc main lock.
+        * This permits __QDISC_STATE_RUNNING owner to get the lock more often
+        * and dequeue packets faster.
+        */
+       if (unlikely(contended))
+               spin_lock(&q->busylock);
+
        spin_lock(root_lock);
        if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
                kfree_skb(skb);
                rc = NET_XMIT_DROP;
        } else if ((q->flags & TCQ_F_CAN_BYPASS) && !qdisc_qlen(q) &&
-                  !test_and_set_bit(__QDISC_STATE_RUNNING, &q->state)) {
+                  qdisc_run_begin(q)) {
                /*
                 * This is a work-conserving queue; there are no old skbs
                 * waiting to be sent out; and the qdisc is not running -
@@ -2054,19 +2066,30 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q,
                if (!(dev->priv_flags & IFF_XMIT_DST_RELEASE))
                        skb_dst_force(skb);
                __qdisc_update_bstats(q, skb->len);
-               if (sch_direct_xmit(skb, q, dev, txq, root_lock))
+               if (sch_direct_xmit(skb, q, dev, txq, root_lock)) {
+                       if (unlikely(contended)) {
+                               spin_unlock(&q->busylock);
+                               contended = false;
+                       }
                        __qdisc_run(q);
-               else
-                       clear_bit(__QDISC_STATE_RUNNING, &q->state);
+               else
+                       qdisc_run_end(q);
 
                rc = NET_XMIT_SUCCESS;
        } else {
                skb_dst_force(skb);
                rc = qdisc_enqueue_root(skb, q);
-               qdisc_run(q);
+               if (qdisc_run_begin(q)) {
+                       if (unlikely(contended)) {
+                               spin_unlock(&q->busylock);
+                               contended = false;
+                       }
+                       __qdisc_run(q);
+               }
        }
        spin_unlock(root_lock);
-
+       if (unlikely(contended))
+               spin_unlock(&q->busylock);
        return rc;
 }
 
@@ -2822,8 +2845,7 @@ static int __netif_receive_skb(struct sk_buff *skb)
                        skb->dev = master;
        }
 
-       __get_cpu_var(softnet_data).processed++;
-
+       __this_cpu_inc(softnet_data.processed);
        skb_reset_network_header(skb);
        skb_reset_transport_header(skb);
        skb->mac_len = skb->network_header - skb->mac_header;