]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - drivers/net/e1000e/netdev.c
e1000e: Save irq into netdev structure
[net-next-2.6.git] / drivers / net / e1000e / netdev.c
index d13760dc27f8e09a5bb324cb70fa1c126f8c27cc..478c34a47d180377225441a44609a486f2d1fe12 100644 (file)
@@ -548,26 +548,23 @@ map_skb:
                rx_desc = E1000_RX_DESC(*rx_ring, i);
                rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
 
+               if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
+                       /*
+                        * Force memory writes to complete before letting h/w
+                        * know there are new descriptors to fetch.  (Only
+                        * applicable for weak-ordered memory model archs,
+                        * such as IA-64).
+                        */
+                       wmb();
+                       writel(i, adapter->hw.hw_addr + rx_ring->tail);
+               }
                i++;
                if (i == rx_ring->count)
                        i = 0;
                buffer_info = &rx_ring->buffer_info[i];
        }
 
-       if (rx_ring->next_to_use != i) {
-               rx_ring->next_to_use = i;
-               if (i-- == 0)
-                       i = (rx_ring->count - 1);
-
-               /*
-                * Force memory writes to complete before letting h/w
-                * know there are new descriptors to fetch.  (Only
-                * applicable for weak-ordered memory model archs,
-                * such as IA-64).
-                */
-               wmb();
-               writel(i, adapter->hw.hw_addr + rx_ring->tail);
-       }
+       rx_ring->next_to_use = i;
 }
 
 /**
@@ -649,6 +646,17 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
 
                rx_desc->read.buffer_addr[0] = cpu_to_le64(buffer_info->dma);
 
+               if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
+                       /*
+                        * Force memory writes to complete before letting h/w
+                        * know there are new descriptors to fetch.  (Only
+                        * applicable for weak-ordered memory model archs,
+                        * such as IA-64).
+                        */
+                       wmb();
+                       writel(i<<1, adapter->hw.hw_addr + rx_ring->tail);
+               }
+
                i++;
                if (i == rx_ring->count)
                        i = 0;
@@ -656,26 +664,7 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
        }
 
 no_buffers:
-       if (rx_ring->next_to_use != i) {
-               rx_ring->next_to_use = i;
-
-               if (!(i--))
-                       i = (rx_ring->count - 1);
-
-               /*
-                * Force memory writes to complete before letting h/w
-                * know there are new descriptors to fetch.  (Only
-                * applicable for weak-ordered memory model archs,
-                * such as IA-64).
-                */
-               wmb();
-               /*
-                * Hardware increments by 16 bytes, but packet split
-                * descriptors are 32 bytes...so we increment tail
-                * twice as much.
-                */
-               writel(i<<1, adapter->hw.hw_addr + rx_ring->tail);
-       }
+       rx_ring->next_to_use = i;
 }
 
 /**
@@ -1001,14 +990,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
                        cleaned = (i == eop);
 
                        if (cleaned) {
-                               struct sk_buff *skb = buffer_info->skb;
-                               unsigned int segs, bytecount;
-                               segs = skb_shinfo(skb)->gso_segs ?: 1;
-                               /* multiply data chunks by size of headers */
-                               bytecount = ((segs - 1) * skb_headlen(skb)) +
-                                           skb->len;
-                               total_tx_packets += segs;
-                               total_tx_bytes += bytecount;
+                               total_tx_packets += buffer_info->segs;
+                               total_tx_bytes += buffer_info->bytecount;
                        }
 
                        e1000_put_txbuf(adapter, buffer_info);
@@ -4261,7 +4244,7 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
        struct e1000_buffer *buffer_info;
        unsigned int len = skb_headlen(skb);
        unsigned int offset = 0, size, count = 0, i;
-       unsigned int f;
+       unsigned int f, bytecount, segs;
 
        i = tx_ring->next_to_use;
 
@@ -4321,7 +4304,13 @@ static int e1000_tx_map(struct e1000_adapter *adapter,
                }
        }
 
+       segs = skb_shinfo(skb)->gso_segs ?: 1;
+       /* multiply data chunks by size of headers */
+       bytecount = ((segs - 1) * skb_headlen(skb)) + skb->len;
+
        tx_ring->buffer_info[i].skb = skb;
+       tx_ring->buffer_info[i].segs = segs;
+       tx_ring->buffer_info[i].bytecount = bytecount;
        tx_ring->buffer_info[first].next_to_watch = i;
 
        return count;
@@ -5451,6 +5440,8 @@ static int __devinit e1000_probe(struct pci_dev *pdev,
 
        SET_NETDEV_DEV(netdev, &pdev->dev);
 
+       netdev->irq = pdev->irq;
+
        pci_set_drvdata(pdev, netdev);
        adapter = netdev_priv(netdev);
        hw = &adapter->hw;