]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - net/core/dev.c
Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[net-next-2.6.git] / net / core / dev.c
index 600bb23c4c2e3743239dc571c459fe3a5e8b19d5..a90737fe24726222ecffba1f63682337c89bf1bb 100644 (file)
@@ -953,6 +953,37 @@ rollback:
        return err;
 }
 
+/**
+ *     dev_set_alias - change ifalias of a device
+ *     @dev: device
+ *     @alias: name up to IFALIASZ
+ *
+ *     Set ifalias for a device,
+ */
+int dev_set_alias(struct net_device *dev, const char *alias, size_t len)
+{
+       ASSERT_RTNL();
+
+       if (len >= IFALIASZ)
+               return -EINVAL;
+
+       if (!len) {
+               if (dev->ifalias) {
+                       kfree(dev->ifalias);
+                       dev->ifalias = NULL;
+               }
+               return 0;
+       }
+
+       dev->ifalias = krealloc(dev->ifalias, len+1, GFP_KERNEL);
+       if (!dev->ifalias)
+               return -ENOMEM;
+
+       strlcpy(dev->ifalias, alias, len+1);
+       return len;
+}
+
+
 /**
  *     netdev_features_change - device changes features
  *     @dev: device to cause notification
@@ -1339,19 +1370,23 @@ static void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
 }
 
 
-void __netif_schedule(struct Qdisc *q)
+static inline void __netif_reschedule(struct Qdisc *q)
 {
-       if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state)) {
-               struct softnet_data *sd;
-               unsigned long flags;
+       struct softnet_data *sd;
+       unsigned long flags;
 
-               local_irq_save(flags);
-               sd = &__get_cpu_var(softnet_data);
-               q->next_sched = sd->output_queue;
-               sd->output_queue = q;
-               raise_softirq_irqoff(NET_TX_SOFTIRQ);
-               local_irq_restore(flags);
-       }
+       local_irq_save(flags);
+       sd = &__get_cpu_var(softnet_data);
+       q->next_sched = sd->output_queue;
+       sd->output_queue = q;
+       raise_softirq_irqoff(NET_TX_SOFTIRQ);
+       local_irq_restore(flags);
+}
+
+void __netif_schedule(struct Qdisc *q)
+{
+       if (!test_and_set_bit(__QDISC_STATE_SCHED, &q->state))
+               __netif_reschedule(q);
 }
 EXPORT_SYMBOL(__netif_schedule);
 
@@ -1671,13 +1706,13 @@ static u16 simple_tx_hash(struct net_device *dev, struct sk_buff *skb)
        }
 
        switch (skb->protocol) {
-       case __constant_htons(ETH_P_IP):
+       case htons(ETH_P_IP):
                ip_proto = ip_hdr(skb)->protocol;
                addr1 = ip_hdr(skb)->saddr;
                addr2 = ip_hdr(skb)->daddr;
                ihl = ip_hdr(skb)->ihl;
                break;
-       case __constant_htons(ETH_P_IPV6):
+       case htons(ETH_P_IPV6):
                ip_proto = ipv6_hdr(skb)->nexthdr;
                addr1 = ipv6_hdr(skb)->saddr.s6_addr32[3];
                addr2 = ipv6_hdr(skb)->daddr.s6_addr32[3];
@@ -1800,9 +1835,13 @@ gso:
 
                spin_lock(root_lock);
 
-               rc = qdisc_enqueue_root(skb, q);
-               qdisc_run(q);
-
+               if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) {
+                       kfree_skb(skb);
+                       rc = NET_XMIT_DROP;
+               } else {
+                       rc = qdisc_enqueue_root(skb, q);
+                       qdisc_run(q);
+               }
                spin_unlock(root_lock);
 
                goto out;
@@ -1974,15 +2013,22 @@ static void net_tx_action(struct softirq_action *h)
 
                        head = head->next_sched;
 
-                       smp_mb__before_clear_bit();
-                       clear_bit(__QDISC_STATE_SCHED, &q->state);
-
                        root_lock = qdisc_lock(q);
                        if (spin_trylock(root_lock)) {
+                               smp_mb__before_clear_bit();
+                               clear_bit(__QDISC_STATE_SCHED,
+                                         &q->state);
                                qdisc_run(q);
                                spin_unlock(root_lock);
                        } else {
-                               __netif_schedule(q);
+                               if (!test_bit(__QDISC_STATE_DEACTIVATED,
+                                             &q->state)) {
+                                       __netif_reschedule(q);
+                               } else {
+                                       smp_mb__before_clear_bit();
+                                       clear_bit(__QDISC_STATE_SCHED,
+                                                 &q->state);
+                               }
                        }
                }
        }
@@ -2084,7 +2130,8 @@ static int ing_filter(struct sk_buff *skb)
        q = rxq->qdisc;
        if (q != &noop_qdisc) {
                spin_lock(qdisc_lock(q));
-               result = qdisc_enqueue_root(skb, q);
+               if (likely(!test_bit(__QDISC_STATE_DEACTIVATED, &q->state)))
+                       result = qdisc_enqueue_root(skb, q);
                spin_unlock(qdisc_lock(q));
        }
 
@@ -4652,6 +4699,12 @@ int netdev_compute_features(unsigned long all, unsigned long one)
                one |= NETIF_F_GSO_SOFTWARE;
        one |= NETIF_F_GSO;
 
+       /*
+        * If even one device supports a GSO protocol with software fallback,
+        * enable it for all.
+        */
+       all |= one & NETIF_F_GSO_SOFTWARE;
+
        /* If even one device supports robust GSO, enable it for all. */
        if (one & NETIF_F_GSO_ROBUST)
                all |= NETIF_F_GSO_ROBUST;