]> bbs.cooldavid.org Git - net-next-2.6.git/commitdiff
qlcnic: fix race in tx stop queue
authorRajesh K Borundia <rajesh.borundia@qlogic.com>
Thu, 17 Jun 2010 02:56:41 +0000 (02:56 +0000)
committerDavid S. Miller <davem@davemloft.net>
Thu, 17 Jun 2010 15:57:56 +0000 (08:57 -0700)
There is a race between netif_stop_queue and netif_stopped_queue
check. So check once again if buffers are available to avoid race.
With above logic we can also get rid of tx lock in process_cmd_ring.

Signed-off-by: Rajesh K Borundia <rajesh.borundia@qlogic.com>
Signed-off-by: Amit Kumar Salecha <amit.salecha@qlogic.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
drivers/net/qlcnic/qlcnic.h
drivers/net/qlcnic/qlcnic_hw.c
drivers/net/qlcnic/qlcnic_init.c
drivers/net/qlcnic/qlcnic_main.c

index 9970cff598d1cf4c4b5f600addeeb079a63ac8f2..99ccdd8ac41966ecc4142b515a2975cf2e63775a 100644 (file)
 #define TX_UDPV6_PKT   0x0c
 
 /* Tx defines */
-#define MAX_BUFFERS_PER_CMD    32
-#define TX_STOP_THRESH         ((MAX_SKB_FRAGS >> 2) + 4)
+#define MAX_TSO_HEADER_DESC    2
+#define MGMT_CMD_DESC_RESV     4
+#define TX_STOP_THRESH         ((MAX_SKB_FRAGS >> 2) + MAX_TSO_HEADER_DESC \
+                                                       + MGMT_CMD_DESC_RESV)
 #define QLCNIC_MAX_TX_TIMEOUTS 2
 
 /*
@@ -369,7 +371,7 @@ struct qlcnic_recv_crb {
  */
 struct qlcnic_cmd_buffer {
        struct sk_buff *skb;
-       struct qlcnic_skb_frag frag_array[MAX_BUFFERS_PER_CMD + 1];
+       struct qlcnic_skb_frag frag_array[MAX_SKB_FRAGS + 1];
        u32 frag_count;
 };
 
index f776956d2d6cdf7b50b18309d4b09dcd3dd19237..d9becb96d403150731dbb8714b4ffca3190f7704 100644 (file)
@@ -338,9 +338,15 @@ qlcnic_send_cmd_descs(struct qlcnic_adapter *adapter,
 
        if (nr_desc >= qlcnic_tx_avail(tx_ring)) {
                netif_tx_stop_queue(tx_ring->txq);
-               __netif_tx_unlock_bh(tx_ring->txq);
-               adapter->stats.xmit_off++;
-               return -EBUSY;
+               smp_mb();
+               if (qlcnic_tx_avail(tx_ring) > nr_desc) {
+                       if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
+                               netif_tx_wake_queue(tx_ring->txq);
+               } else {
+                       adapter->stats.xmit_off++;
+                       __netif_tx_unlock_bh(tx_ring->txq);
+                       return -EBUSY;
+               }
        }
 
        do {
index 2bd00d54dd3f7ccc6ffb8a358b886c9b85ef49a2..058ce61501c357e6d27a4d9deb402799ae5e0c09 100644 (file)
@@ -181,7 +181,9 @@ skip_rds:
 
        tx_ring = adapter->tx_ring;
        vfree(tx_ring->cmd_buf_arr);
+       tx_ring->cmd_buf_arr = NULL;
        kfree(adapter->tx_ring);
+       adapter->tx_ring = NULL;
 }
 
 int qlcnic_alloc_sw_resources(struct qlcnic_adapter *adapter)
index 06d2dfd646fe66d9f111686379241b8804682d24..655bccd7f8f48583e724eeda3c4883797e25bb4d 100644 (file)
@@ -132,12 +132,6 @@ qlcnic_update_cmd_producer(struct qlcnic_adapter *adapter,
                struct qlcnic_host_tx_ring *tx_ring)
 {
        writel(tx_ring->producer, tx_ring->crb_cmd_producer);
-
-       if (qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH) {
-               netif_stop_queue(adapter->netdev);
-               smp_mb();
-               adapter->stats.xmit_off++;
-       }
 }
 
 static const u32 msi_tgt_status[8] = {
@@ -1137,7 +1131,7 @@ qlcnic_setup_netdev(struct qlcnic_adapter *adapter,
        adapter->max_mc_count = 38;
 
        netdev->netdev_ops         = &qlcnic_netdev_ops;
-       netdev->watchdog_timeo     = 2*HZ;
+       netdev->watchdog_timeo     = 5*HZ;
 
        qlcnic_change_mtu(netdev, netdev->mtu);
 
@@ -1709,10 +1703,15 @@ qlcnic_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        /* 4 fragments per cmd des */
        no_of_desc = (frag_count + 3) >> 2;
 
-       if (unlikely(no_of_desc + 2 > qlcnic_tx_avail(tx_ring))) {
+       if (unlikely(qlcnic_tx_avail(tx_ring) <= TX_STOP_THRESH)) {
                netif_stop_queue(netdev);
-               adapter->stats.xmit_off++;
-               return NETDEV_TX_BUSY;
+               smp_mb();
+               if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH)
+                       netif_start_queue(netdev);
+               else {
+                       adapter->stats.xmit_off++;
+                       return NETDEV_TX_BUSY;
+               }
        }
 
        producer = tx_ring->producer;
@@ -2018,14 +2017,12 @@ static int qlcnic_process_cmd_ring(struct qlcnic_adapter *adapter)
                smp_mb();
 
                if (netif_queue_stopped(netdev) && netif_carrier_ok(netdev)) {
-                       __netif_tx_lock(tx_ring->txq, smp_processor_id());
                        if (qlcnic_tx_avail(tx_ring) > TX_STOP_THRESH) {
                                netif_wake_queue(netdev);
-                               adapter->tx_timeo_cnt = 0;
                                adapter->stats.xmit_on++;
                        }
-                       __netif_tx_unlock(tx_ring->txq);
                }
+               adapter->tx_timeo_cnt = 0;
        }
        /*
         * If everything is freed up to consumer then check if the ring is full