]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
[NET]: Add netif_tx_lock
authorHerbert Xu <herbert@gondor.apana.org.au>
Fri, 9 Jun 2006 19:20:56 +0000 (12:20 -0700)
committerDavid S. Miller <davem@sunset.davemloft.net>
Sun, 18 Jun 2006 04:30:14 +0000 (21:30 -0700)
Various drivers use xmit_lock internally to synchronise with their
transmission routines.  They do so without setting xmit_lock_owner.
This is fine as long as netpoll is not in use.

With netpoll it is possible for deadlocks to occur if xmit_lock_owner
isn't set.  This is because if a printk occurs while xmit_lock is held
and xmit_lock_owner is not set can cause netpoll to attempt to take
xmit_lock recursively.

While it is possible to resolve this by getting netpoll to use
trylock, it is suboptimal because netpoll's sole objective is to
maximise the chance of getting the printk out on the wire.  So
delaying or dropping the message is to be avoided as much as possible.

So the only alternative is to always set xmit_lock_owner.  The
following patch does this by introducing the netif_tx_lock family of
functions that take care of setting/unsetting xmit_lock_owner.

I renamed xmit_lock to _xmit_lock to indicate that it should not be
used directly.  I didn't provide irq versions of the netif_tx_lock
functions since xmit_lock is meant to be a BH-disabling lock.

This is pretty much a straight text substitution except for a small
bug fix in winbond.  It currently uses
netif_stop_queue/spin_unlock_wait to stop transmission.  This is
unsafe as an IRQ can potentially wake up the queue.  So it is safer to
use netif_tx_disable.

The hamradio bits used spin_lock_irq but it is unnecessary as
xmit_lock must never be taken in an IRQ handler.

Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
Signed-off-by: David S. Miller <davem@davemloft.net>
21 files changed:
Documentation/networking/netdevices.txt
drivers/infiniband/ulp/ipoib/ipoib_multicast.c
drivers/media/dvb/dvb-core/dvb_net.c
drivers/net/bnx2.c
drivers/net/bonding/bond_main.c
drivers/net/forcedeth.c
drivers/net/hamradio/6pack.c
drivers/net/hamradio/mkiss.c
drivers/net/ifb.c
drivers/net/irda/vlsi_ir.c
drivers/net/natsemi.c
drivers/net/tulip/winbond-840.c
drivers/net/wireless/orinoco.c
include/linux/netdevice.h
net/atm/clip.c
net/core/dev.c
net/core/dev_mcast.c
net/core/netpoll.c
net/core/pktgen.c
net/sched/sch_generic.c
net/sched/sch_teql.c

index 3c0a5ba614d7c39146668f0b990e3fab03cceb06..847cedb238f6c0ed24ad6568b1d440bc3843fc6f 100644 (file)
@@ -42,9 +42,9 @@ dev->get_stats:
        Context: nominally process, but don't sleep inside an rwlock
 
 dev->hard_start_xmit:
-       Synchronization: dev->xmit_lock spinlock.
+       Synchronization: netif_tx_lock spinlock.
        When the driver sets NETIF_F_LLTX in dev->features this will be
-       called without holding xmit_lock. In this case the driver 
+       called without holding netif_tx_lock. In this case the driver
        has to lock by itself when needed. It is recommended to use a try lock
        for this and return -1 when the spin lock fails. 
        The locking there should also properly protect against 
@@ -62,12 +62,12 @@ dev->hard_start_xmit:
          Only valid when NETIF_F_LLTX is set.
 
 dev->tx_timeout:
-       Synchronization: dev->xmit_lock spinlock.
+       Synchronization: netif_tx_lock spinlock.
        Context: BHs disabled
        Notes: netif_queue_stopped() is guaranteed true
 
 dev->set_multicast_list:
-       Synchronization: dev->xmit_lock spinlock.
+       Synchronization: netif_tx_lock spinlock.
        Context: BHs disabled
 
 dev->poll:
index 1dae4b238252d3b204d68570bc4fa73f9e73959c..1d917edcf9ba99ea3e2de4e33700ba4e05893c53 100644 (file)
@@ -821,7 +821,8 @@ void ipoib_mcast_restart_task(void *dev_ptr)
 
        ipoib_mcast_stop_thread(dev, 0);
 
-       spin_lock_irqsave(&dev->xmit_lock, flags);
+       local_irq_save(flags);
+       netif_tx_lock(dev);
        spin_lock(&priv->lock);
 
        /*
@@ -896,7 +897,8 @@ void ipoib_mcast_restart_task(void *dev_ptr)
        }
 
        spin_unlock(&priv->lock);
-       spin_unlock_irqrestore(&dev->xmit_lock, flags);
+       netif_tx_unlock(dev);
+       local_irq_restore(flags);
 
        /* We have to cancel outside of the spinlock */
        list_for_each_entry_safe(mcast, tmcast, &remove_list, list) {
index 2f0f35811bf7710c2a95e63bbeaa37c2900be0af..9fd87521a1639bd3dae51dcdce48545614d41a85 100644 (file)
@@ -1052,7 +1052,7 @@ static void wq_set_multicast_list (void *data)
 
        dvb_net_feed_stop(dev);
        priv->rx_mode = RX_MODE_UNI;
-       spin_lock_bh(&dev->xmit_lock);
+       netif_tx_lock_bh(dev);
 
        if (dev->flags & IFF_PROMISC) {
                dprintk("%s: promiscuous mode\n", dev->name);
@@ -1077,7 +1077,7 @@ static void wq_set_multicast_list (void *data)
                }
        }
 
-       spin_unlock_bh(&dev->xmit_lock);
+       netif_tx_unlock_bh(dev);
        dvb_net_feed_start(dev);
 }
 
index 54161aef3cac0cd763479a3a5bc9d7b00da1f83b..9c5a8842ed0f6377d877f68980b8027afbf98779 100644 (file)
@@ -2009,7 +2009,7 @@ bnx2_poll(struct net_device *dev, int *budget)
        return 1;
 }
 
-/* Called with rtnl_lock from vlan functions and also dev->xmit_lock
+/* Called with rtnl_lock from vlan functions and also netif_tx_lock
  * from set_multicast.
  */
 static void
@@ -4252,7 +4252,7 @@ bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
 }
 #endif
 
-/* Called with dev->xmit_lock.
+/* Called with netif_tx_lock.
  * hard_start_xmit is pseudo-lockless - a lock is only required when
  * the tx queue is full. This way, we get the benefit of lockless
  * operations most of the time without the complexities to handle
index 55d236726d118382d0237db9d503305de3d7eba0..46326cdfb2776616bec130476ad60ee03620a527 100644 (file)
@@ -4191,7 +4191,7 @@ static int bond_init(struct net_device *bond_dev, struct bond_params *params)
         */
        bond_dev->features |= NETIF_F_VLAN_CHALLENGED;
 
-       /* don't acquire bond device's xmit_lock when 
+       /* don't acquire bond device's netif_tx_lock when
         * transmitting */
        bond_dev->features |= NETIF_F_LLTX;
 
index feb5b223cd60ed09e0dca642701d1d16f388fd49..5a8651b4b01dd297e7d42a4c5dc4f69bc67985c9 100644 (file)
@@ -533,9 +533,9 @@ typedef union _ring_type {
  * critical parts:
  * - rx is (pseudo-) lockless: it relies on the single-threading provided
  *     by the arch code for interrupts.
- * - tx setup is lockless: it relies on dev->xmit_lock. Actual submission
+ * - tx setup is lockless: it relies on netif_tx_lock. Actual submission
  *     needs dev->priv->lock :-(
- * - set_multicast_list: preparation lockless, relies on dev->xmit_lock.
+ * - set_multicast_list: preparation lockless, relies on netif_tx_lock.
  */
 
 /* in dev: base, irq */
@@ -1213,7 +1213,7 @@ static void drain_ring(struct net_device *dev)
 
 /*
  * nv_start_xmit: dev->hard_start_xmit function
- * Called with dev->xmit_lock held.
+ * Called with netif_tx_lock held.
  */
 static int nv_start_xmit(struct sk_buff *skb, struct net_device *dev)
 {
@@ -1407,7 +1407,7 @@ static void nv_tx_done(struct net_device *dev)
 
 /*
  * nv_tx_timeout: dev->tx_timeout function
- * Called with dev->xmit_lock held.
+ * Called with netif_tx_lock held.
  */
 static void nv_tx_timeout(struct net_device *dev)
 {
@@ -1737,7 +1737,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
                 * Changing the MTU is a rare event, it shouldn't matter.
                 */
                nv_disable_irq(dev);
-               spin_lock_bh(&dev->xmit_lock);
+               netif_tx_lock_bh(dev);
                spin_lock(&np->lock);
                /* stop engines */
                nv_stop_rx(dev);
@@ -1768,7 +1768,7 @@ static int nv_change_mtu(struct net_device *dev, int new_mtu)
                nv_start_rx(dev);
                nv_start_tx(dev);
                spin_unlock(&np->lock);
-               spin_unlock_bh(&dev->xmit_lock);
+               netif_tx_unlock_bh(dev);
                nv_enable_irq(dev);
        }
        return 0;
@@ -1803,7 +1803,7 @@ static int nv_set_mac_address(struct net_device *dev, void *addr)
        memcpy(dev->dev_addr, macaddr->sa_data, ETH_ALEN);
 
        if (netif_running(dev)) {
-               spin_lock_bh(&dev->xmit_lock);
+               netif_tx_lock_bh(dev);
                spin_lock_irq(&np->lock);
 
                /* stop rx engine */
@@ -1815,7 +1815,7 @@ static int nv_set_mac_address(struct net_device *dev, void *addr)
                /* restart rx engine */
                nv_start_rx(dev);
                spin_unlock_irq(&np->lock);
-               spin_unlock_bh(&dev->xmit_lock);
+               netif_tx_unlock_bh(dev);
        } else {
                nv_copy_mac_to_hw(dev);
        }
@@ -1824,7 +1824,7 @@ static int nv_set_mac_address(struct net_device *dev, void *addr)
 
 /*
  * nv_set_multicast: dev->set_multicast function
- * Called with dev->xmit_lock held.
+ * Called with netif_tx_lock held.
  */
 static void nv_set_multicast(struct net_device *dev)
 {
index 102c1f0b90dac7a941902322dbc33cfd295c1799..d12605f0ac7c6e0f4b01ed0558d61a47c4037008 100644 (file)
@@ -308,9 +308,9 @@ static int sp_set_mac_address(struct net_device *dev, void *addr)
 {
        struct sockaddr_ax25 *sa = addr;
 
-       spin_lock_irq(&dev->xmit_lock);
+       netif_tx_lock_bh(dev);
        memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN);
-       spin_unlock_irq(&dev->xmit_lock);
+       netif_tx_unlock_bh(dev);
 
        return 0;
 }
@@ -767,9 +767,9 @@ static int sixpack_ioctl(struct tty_struct *tty, struct file *file,
                        break;
                }
 
-               spin_lock_irq(&dev->xmit_lock);
+               netif_tx_lock_bh(dev);
                memcpy(dev->dev_addr, &addr, AX25_ADDR_LEN);
-               spin_unlock_irq(&dev->xmit_lock);
+               netif_tx_unlock_bh(dev);
 
                err = 0;
                break;
index d81a8e1eeb8d5f807fce83f5a99ef04fefd755cf..3ebbbe56b6e94be8c9fd7ec3be2b7d3486f7c51d 100644 (file)
@@ -357,9 +357,9 @@ static int ax_set_mac_address(struct net_device *dev, void *addr)
 {
        struct sockaddr_ax25 *sa = addr;
 
-       spin_lock_irq(&dev->xmit_lock);
+       netif_tx_lock_bh(dev);
        memcpy(dev->dev_addr, &sa->sax25_call, AX25_ADDR_LEN);
-       spin_unlock_irq(&dev->xmit_lock);
+       netif_tx_unlock_bh(dev);
 
        return 0;
 }
@@ -886,9 +886,9 @@ static int mkiss_ioctl(struct tty_struct *tty, struct file *file,
                        break;
                }
 
-               spin_lock_irq(&dev->xmit_lock);
+               netif_tx_lock_bh(dev);
                memcpy(dev->dev_addr, addr, AX25_ADDR_LEN);
-               spin_unlock_irq(&dev->xmit_lock);
+               netif_tx_unlock_bh(dev);
 
                err = 0;
                break;
index 31fb2d75dc447a79bffa71f86d728425b68472a1..2e222ef91e224892c675766831fddc851e40f017 100644 (file)
@@ -76,13 +76,13 @@ static void ri_tasklet(unsigned long dev)
        dp->st_task_enter++;
        if ((skb = skb_peek(&dp->tq)) == NULL) {
                dp->st_txq_refl_try++;
-               if (spin_trylock(&_dev->xmit_lock)) {
+               if (netif_tx_trylock(_dev)) {
                        dp->st_rxq_enter++;
                        while ((skb = skb_dequeue(&dp->rq)) != NULL) {
                                skb_queue_tail(&dp->tq, skb);
                                dp->st_rx2tx_tran++;
                        }
-                       spin_unlock(&_dev->xmit_lock);
+                       netif_tx_unlock(_dev);
                } else {
                        /* reschedule */
                        dp->st_rxq_notenter++;
@@ -110,7 +110,7 @@ static void ri_tasklet(unsigned long dev)
                }
        }
 
-       if (spin_trylock(&_dev->xmit_lock)) {
+       if (netif_tx_trylock(_dev)) {
                dp->st_rxq_check++;
                if ((skb = skb_peek(&dp->rq)) == NULL) {
                        dp->tasklet_pending = 0;
@@ -118,10 +118,10 @@ static void ri_tasklet(unsigned long dev)
                                netif_wake_queue(_dev);
                } else {
                        dp->st_rxq_rsch++;
-                       spin_unlock(&_dev->xmit_lock);
+                       netif_tx_unlock(_dev);
                        goto resched;
                }
-               spin_unlock(&_dev->xmit_lock);
+               netif_tx_unlock(_dev);
        } else {
 resched:
                dp->tasklet_pending = 1;
index 97a49e0be76bc6197945814639b41e1d39f6caf1..d70b9e8d6e6054208216efb7ff9f88b1e50d7970 100644 (file)
@@ -959,7 +959,7 @@ static int vlsi_hard_start_xmit(struct sk_buff *skb, struct net_device *ndev)
                            ||  (now.tv_sec==ready.tv_sec && now.tv_usec>=ready.tv_usec))
                                break;
                        udelay(100);
-                       /* must not sleep here - we are called under xmit_lock! */
+                       /* must not sleep here - called under netif_tx_lock! */
                }
        }
 
index 90627756d6fa47bbe93ce363af1d6ced2307fba4..2e4ecedba0572cc655b3827a39c88d0ddebb139b 100644 (file)
@@ -318,12 +318,12 @@ performance critical codepaths:
 The rx process only runs in the interrupt handler. Access from outside
 the interrupt handler is only permitted after disable_irq().
 
-The rx process usually runs under the dev->xmit_lock. If np->intr_tx_reap
+The rx process usually runs under the netif_tx_lock. If np->intr_tx_reap
 is set, then access is permitted under spin_lock_irq(&np->lock).
 
 Thus configuration functions that want to access everything must call
        disable_irq(dev->irq);
-       spin_lock_bh(dev->xmit_lock);
+       netif_tx_lock_bh(dev);
        spin_lock_irq(&np->lock);
 
 IV. Notes
index 136a70c4d5e4880c442cfd75bf5f6450a621cfb9..56d86c7c03862348d52a5ead0dd7c554168d649b 100644 (file)
@@ -1605,11 +1605,11 @@ static void __devexit w840_remove1 (struct pci_dev *pdev)
  * - get_stats:
  *     spin_lock_irq(np->lock), doesn't touch hw if not present
  * - hard_start_xmit:
- *     netif_stop_queue + spin_unlock_wait(&dev->xmit_lock);
+ *     synchronize_irq + netif_tx_disable;
  * - tx_timeout:
- *     netif_device_detach + spin_unlock_wait(&dev->xmit_lock);
+ *     netif_device_detach + netif_tx_disable;
  * - set_multicast_list
- *     netif_device_detach + spin_unlock_wait(&dev->xmit_lock);
+ *     netif_device_detach + netif_tx_disable;
  * - interrupt handler
  *     doesn't touch hw if not present, synchronize_irq waits for
  *     running instances of the interrupt handler.
@@ -1635,11 +1635,10 @@ static int w840_suspend (struct pci_dev *pdev, pm_message_t state)
                netif_device_detach(dev);
                update_csr6(dev, 0);
                iowrite32(0, ioaddr + IntrEnable);
-               netif_stop_queue(dev);
                spin_unlock_irq(&np->lock);
 
-               spin_unlock_wait(&dev->xmit_lock);
                synchronize_irq(dev->irq);
+               netif_tx_disable(dev);
        
                np->stats.rx_missed_errors += ioread32(ioaddr + RxMissed) & 0xffff;
 
index c2d0b09e0418e1955c452248bb65686d8ed0d70e..a5fcfcde63d18c2ebea07136e7f4c074ecb543c6 100644 (file)
@@ -1833,7 +1833,9 @@ static int __orinoco_program_rids(struct net_device *dev)
        /* Set promiscuity / multicast*/
        priv->promiscuous = 0;
        priv->mc_count = 0;
-       __orinoco_set_multicast_list(dev); /* FIXME: what about the xmit_lock */
+
+       /* FIXME: what about netif_tx_lock */
+       __orinoco_set_multicast_list(dev);
 
        return 0;
 }
index b5760c67af9ce6aeeecfa940dec2c721bd91a3bb..067b9ccafd87ef1be07e8780e3d2397e7421b41d 100644 (file)
@@ -407,7 +407,7 @@ struct net_device
  * One part is mostly used on xmit path (device)
  */
        /* hard_start_xmit synchronizer */
-       spinlock_t              xmit_lock ____cacheline_aligned_in_smp;
+       spinlock_t              _xmit_lock ____cacheline_aligned_in_smp;
        /* cpu id of processor entered to hard_start_xmit or -1,
           if nobody entered there.
         */
@@ -893,11 +893,43 @@ static inline void __netif_rx_complete(struct net_device *dev)
        clear_bit(__LINK_STATE_RX_SCHED, &dev->state);
 }
 
+static inline void netif_tx_lock(struct net_device *dev)
+{
+       spin_lock(&dev->_xmit_lock);
+       dev->xmit_lock_owner = smp_processor_id();
+}
+
+static inline void netif_tx_lock_bh(struct net_device *dev)
+{
+       spin_lock_bh(&dev->_xmit_lock);
+       dev->xmit_lock_owner = smp_processor_id();
+}
+
+static inline int netif_tx_trylock(struct net_device *dev)
+{
+       int err = spin_trylock(&dev->_xmit_lock);
+       if (!err)
+               dev->xmit_lock_owner = smp_processor_id();
+       return err;
+}
+
+static inline void netif_tx_unlock(struct net_device *dev)
+{
+       dev->xmit_lock_owner = -1;
+       spin_unlock(&dev->_xmit_lock);
+}
+
+static inline void netif_tx_unlock_bh(struct net_device *dev)
+{
+       dev->xmit_lock_owner = -1;
+       spin_unlock_bh(&dev->_xmit_lock);
+}
+
 static inline void netif_tx_disable(struct net_device *dev)
 {
-       spin_lock_bh(&dev->xmit_lock);
+       netif_tx_lock_bh(dev);
        netif_stop_queue(dev);
-       spin_unlock_bh(&dev->xmit_lock);
+       netif_tx_unlock_bh(dev);
 }
 
 /* These functions live elsewhere (drivers/net/net_init.c, but related) */
index 72d8529826646e35018514046c3afac4197885a8..f92f9c94d2c762c51ffe76031c7598e85759a574 100644 (file)
@@ -98,7 +98,7 @@ static void unlink_clip_vcc(struct clip_vcc *clip_vcc)
                printk(KERN_CRIT "!clip_vcc->entry (clip_vcc %p)\n", clip_vcc);
                return;
        }
-       spin_lock_bh(&entry->neigh->dev->xmit_lock);    /* block clip_start_xmit() */
+       netif_tx_lock_bh(entry->neigh->dev);    /* block clip_start_xmit() */
        entry->neigh->used = jiffies;
        for (walk = &entry->vccs; *walk; walk = &(*walk)->next)
                if (*walk == clip_vcc) {
@@ -122,7 +122,7 @@ static void unlink_clip_vcc(struct clip_vcc *clip_vcc)
        printk(KERN_CRIT "ATMARP: unlink_clip_vcc failed (entry %p, vcc "
               "0x%p)\n", entry, clip_vcc);
       out:
-       spin_unlock_bh(&entry->neigh->dev->xmit_lock);
+       netif_tx_unlock_bh(entry->neigh->dev);
 }
 
 /* The neighbour entry n->lock is held. */
index 6bfa78c66c2546722dc2a7d309715fbbd1dbe80f..1b09f1cae46ea069e6f709895f33a5a28eb79c4a 100644 (file)
@@ -1282,15 +1282,13 @@ int __skb_linearize(struct sk_buff *skb, gfp_t gfp_mask)
 
 #define HARD_TX_LOCK(dev, cpu) {                       \
        if ((dev->features & NETIF_F_LLTX) == 0) {      \
-               spin_lock(&dev->xmit_lock);             \
-               dev->xmit_lock_owner = cpu;             \
+               netif_tx_lock(dev);                     \
        }                                               \
 }
 
 #define HARD_TX_UNLOCK(dev) {                          \
        if ((dev->features & NETIF_F_LLTX) == 0) {      \
-               dev->xmit_lock_owner = -1;              \
-               spin_unlock(&dev->xmit_lock);           \
+               netif_tx_unlock(dev);                   \
        }                                               \
 }
 
@@ -1389,8 +1387,8 @@ int dev_queue_xmit(struct sk_buff *skb)
        /* The device has no queue. Common case for software devices:
           loopback, all the sorts of tunnels...
 
-          Really, it is unlikely that xmit_lock protection is necessary here.
-          (f.e. loopback and IP tunnels are clean ignoring statistics
+          Really, it is unlikely that netif_tx_lock protection is necessary
+          here.  (f.e. loopback and IP tunnels are clean ignoring statistics
           counters.)
           However, it is possible, that they rely on protection
           made by us here.
@@ -2805,7 +2803,7 @@ int register_netdevice(struct net_device *dev)
        BUG_ON(dev->reg_state != NETREG_UNINITIALIZED);
 
        spin_lock_init(&dev->queue_lock);
-       spin_lock_init(&dev->xmit_lock);
+       spin_lock_init(&dev->_xmit_lock);
        dev->xmit_lock_owner = -1;
 #ifdef CONFIG_NET_CLS_ACT
        spin_lock_init(&dev->ingress_lock);
index 05d60850840e07a0d0beba51baf2b53fb622a883..c57d887da2ef20733e3d7f9c49e0d6b0aa405458 100644 (file)
@@ -62,7 +62,7 @@
  *     Device mc lists are changed by bh at least if IPv6 is enabled,
  *     so that it must be bh protected.
  *
- *     We block accesses to device mc filters with dev->xmit_lock.
+ *     We block accesses to device mc filters with netif_tx_lock.
  */
 
 /*
@@ -93,9 +93,9 @@ static void __dev_mc_upload(struct net_device *dev)
 
 void dev_mc_upload(struct net_device *dev)
 {
-       spin_lock_bh(&dev->xmit_lock);
+       netif_tx_lock_bh(dev);
        __dev_mc_upload(dev);
-       spin_unlock_bh(&dev->xmit_lock);
+       netif_tx_unlock_bh(dev);
 }
 
 /*
@@ -107,7 +107,7 @@ int dev_mc_delete(struct net_device *dev, void *addr, int alen, int glbl)
        int err = 0;
        struct dev_mc_list *dmi, **dmip;
 
-       spin_lock_bh(&dev->xmit_lock);
+       netif_tx_lock_bh(dev);
 
        for (dmip = &dev->mc_list; (dmi = *dmip) != NULL; dmip = &dmi->next) {
                /*
@@ -139,13 +139,13 @@ int dev_mc_delete(struct net_device *dev, void *addr, int alen, int glbl)
                         */
                        __dev_mc_upload(dev);
                        
-                       spin_unlock_bh(&dev->xmit_lock);
+                       netif_tx_unlock_bh(dev);
                        return 0;
                }
        }
        err = -ENOENT;
 done:
-       spin_unlock_bh(&dev->xmit_lock);
+       netif_tx_unlock_bh(dev);
        return err;
 }
 
@@ -160,7 +160,7 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl)
 
        dmi1 = kmalloc(sizeof(*dmi), GFP_ATOMIC);
 
-       spin_lock_bh(&dev->xmit_lock);
+       netif_tx_lock_bh(dev);
        for (dmi = dev->mc_list; dmi != NULL; dmi = dmi->next) {
                if (memcmp(dmi->dmi_addr, addr, dmi->dmi_addrlen) == 0 &&
                    dmi->dmi_addrlen == alen) {
@@ -176,7 +176,7 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl)
        }
 
        if ((dmi = dmi1) == NULL) {
-               spin_unlock_bh(&dev->xmit_lock);
+               netif_tx_unlock_bh(dev);
                return -ENOMEM;
        }
        memcpy(dmi->dmi_addr, addr, alen);
@@ -189,11 +189,11 @@ int dev_mc_add(struct net_device *dev, void *addr, int alen, int glbl)
 
        __dev_mc_upload(dev);
        
-       spin_unlock_bh(&dev->xmit_lock);
+       netif_tx_unlock_bh(dev);
        return 0;
 
 done:
-       spin_unlock_bh(&dev->xmit_lock);
+       netif_tx_unlock_bh(dev);
        kfree(dmi1);
        return err;
 }
@@ -204,7 +204,7 @@ done:
 
 void dev_mc_discard(struct net_device *dev)
 {
-       spin_lock_bh(&dev->xmit_lock);
+       netif_tx_lock_bh(dev);
        
        while (dev->mc_list != NULL) {
                struct dev_mc_list *tmp = dev->mc_list;
@@ -215,7 +215,7 @@ void dev_mc_discard(struct net_device *dev)
        }
        dev->mc_count = 0;
 
-       spin_unlock_bh(&dev->xmit_lock);
+       netif_tx_unlock_bh(dev);
 }
 
 #ifdef CONFIG_PROC_FS
@@ -250,7 +250,7 @@ static int dev_mc_seq_show(struct seq_file *seq, void *v)
        struct dev_mc_list *m;
        struct net_device *dev = v;
 
-       spin_lock_bh(&dev->xmit_lock);
+       netif_tx_lock_bh(dev);
        for (m = dev->mc_list; m; m = m->next) {
                int i;
 
@@ -262,7 +262,7 @@ static int dev_mc_seq_show(struct seq_file *seq, void *v)
 
                seq_putc(seq, '\n');
        }
-       spin_unlock_bh(&dev->xmit_lock);
+       netif_tx_unlock_bh(dev);
        return 0;
 }
 
index e8e05cebd95adaeee351c84144d6c51250b503e9..9cb781830380242cc16b9204cfdb103381dc4234 100644 (file)
@@ -273,24 +273,21 @@ static void netpoll_send_skb(struct netpoll *np, struct sk_buff *skb)
 
        do {
                npinfo->tries--;
-               spin_lock(&np->dev->xmit_lock);
-               np->dev->xmit_lock_owner = smp_processor_id();
+               netif_tx_lock(np->dev);
 
                /*
                 * network drivers do not expect to be called if the queue is
                 * stopped.
                 */
                if (netif_queue_stopped(np->dev)) {
-                       np->dev->xmit_lock_owner = -1;
-                       spin_unlock(&np->dev->xmit_lock);
+                       netif_tx_unlock(np->dev);
                        netpoll_poll(np);
                        udelay(50);
                        continue;
                }
 
                status = np->dev->hard_start_xmit(skb, np->dev);
-               np->dev->xmit_lock_owner = -1;
-               spin_unlock(&np->dev->xmit_lock);
+               netif_tx_unlock(np->dev);
 
                /* success */
                if(!status) {
index c23e9c06ee237203e792f421aabf6a3005996009..67ed14ddabd2b83a32bef68b002c67298bcc2f42 100644 (file)
@@ -2897,7 +2897,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
                }
        }
 
-       spin_lock_bh(&odev->xmit_lock);
+       netif_tx_lock_bh(odev);
        if (!netif_queue_stopped(odev)) {
 
                atomic_inc(&(pkt_dev->skb->users));
@@ -2942,7 +2942,7 @@ static __inline__ void pktgen_xmit(struct pktgen_dev *pkt_dev)
                pkt_dev->next_tx_ns = 0;
        }
 
-       spin_unlock_bh(&odev->xmit_lock);
+       netif_tx_unlock_bh(odev);
 
        /* If pkt_dev->count is zero, then run forever */
        if ((pkt_dev->count != 0) && (pkt_dev->sofar >= pkt_dev->count)) {
index 138ea92ed268457b99d9c6b44a1a9580cc7c016a..b1e4c5e20ac709c971fd68c8a4b8c573d4c94468 100644 (file)
@@ -72,9 +72,9 @@ void qdisc_unlock_tree(struct net_device *dev)
    dev->queue_lock serializes queue accesses for this device
    AND dev->qdisc pointer itself.
 
-   dev->xmit_lock serializes accesses to device driver.
+   netif_tx_lock serializes accesses to device driver.
 
-   dev->queue_lock and dev->xmit_lock are mutually exclusive,
+   dev->queue_lock and netif_tx_lock are mutually exclusive,
    if one is grabbed, another must be free.
  */
 
@@ -108,7 +108,7 @@ int qdisc_restart(struct net_device *dev)
                 * will be requeued.
                 */
                if (!nolock) {
-                       if (!spin_trylock(&dev->xmit_lock)) {
+                       if (!netif_tx_trylock(dev)) {
                        collision:
                                /* So, someone grabbed the driver. */
                                
@@ -126,8 +126,6 @@ int qdisc_restart(struct net_device *dev)
                                __get_cpu_var(netdev_rx_stat).cpu_collision++;
                                goto requeue;
                        }
-                       /* Remember that the driver is grabbed by us. */
-                       dev->xmit_lock_owner = smp_processor_id();
                }
                
                {
@@ -142,8 +140,7 @@ int qdisc_restart(struct net_device *dev)
                                ret = dev->hard_start_xmit(skb, dev);
                                if (ret == NETDEV_TX_OK) { 
                                        if (!nolock) {
-                                               dev->xmit_lock_owner = -1;
-                                               spin_unlock(&dev->xmit_lock);
+                                               netif_tx_unlock(dev);
                                        }
                                        spin_lock(&dev->queue_lock);
                                        return -1;
@@ -157,8 +154,7 @@ int qdisc_restart(struct net_device *dev)
                        /* NETDEV_TX_BUSY - we need to requeue */
                        /* Release the driver */
                        if (!nolock) { 
-                               dev->xmit_lock_owner = -1;
-                               spin_unlock(&dev->xmit_lock);
+                               netif_tx_unlock(dev);
                        } 
                        spin_lock(&dev->queue_lock);
                        q = dev->qdisc;
@@ -187,7 +183,7 @@ static void dev_watchdog(unsigned long arg)
 {
        struct net_device *dev = (struct net_device *)arg;
 
-       spin_lock(&dev->xmit_lock);
+       netif_tx_lock(dev);
        if (dev->qdisc != &noop_qdisc) {
                if (netif_device_present(dev) &&
                    netif_running(dev) &&
@@ -203,7 +199,7 @@ static void dev_watchdog(unsigned long arg)
                                dev_hold(dev);
                }
        }
-       spin_unlock(&dev->xmit_lock);
+       netif_tx_unlock(dev);
 
        dev_put(dev);
 }
@@ -227,17 +223,17 @@ void __netdev_watchdog_up(struct net_device *dev)
 
 static void dev_watchdog_up(struct net_device *dev)
 {
-       spin_lock_bh(&dev->xmit_lock);
+       netif_tx_lock_bh(dev);
        __netdev_watchdog_up(dev);
-       spin_unlock_bh(&dev->xmit_lock);
+       netif_tx_unlock_bh(dev);
 }
 
 static void dev_watchdog_down(struct net_device *dev)
 {
-       spin_lock_bh(&dev->xmit_lock);
+       netif_tx_lock_bh(dev);
        if (del_timer(&dev->watchdog_timer))
                dev_put(dev);
-       spin_unlock_bh(&dev->xmit_lock);
+       netif_tx_unlock_bh(dev);
 }
 
 void netif_carrier_on(struct net_device *dev)
@@ -582,7 +578,7 @@ void dev_deactivate(struct net_device *dev)
        while (test_bit(__LINK_STATE_SCHED, &dev->state))
                yield();
 
-       spin_unlock_wait(&dev->xmit_lock);
+       spin_unlock_wait(&dev->_xmit_lock);
 }
 
 void dev_init_scheduler(struct net_device *dev)
index 79b8ef34c6e4c9b14d5d877ba64f0a496701e316..4c16ad57a3e49141335a2812b4c7bc3e976e9b46 100644 (file)
@@ -302,20 +302,17 @@ restart:
 
                switch (teql_resolve(skb, skb_res, slave)) {
                case 0:
-                       if (spin_trylock(&slave->xmit_lock)) {
-                               slave->xmit_lock_owner = smp_processor_id();
+                       if (netif_tx_trylock(slave)) {
                                if (!netif_queue_stopped(slave) &&
                                    slave->hard_start_xmit(skb, slave) == 0) {
-                                       slave->xmit_lock_owner = -1;
-                                       spin_unlock(&slave->xmit_lock);
+                                       netif_tx_unlock(slave);
                                        master->slaves = NEXT_SLAVE(q);
                                        netif_wake_queue(dev);
                                        master->stats.tx_packets++;
                                        master->stats.tx_bytes += len;
                                        return 0;
                                }
-                               slave->xmit_lock_owner = -1;
-                               spin_unlock(&slave->xmit_lock);
+                               netif_tx_unlock(slave);
                        }
                        if (netif_queue_stopped(dev))
                                busy = 1;