]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - drivers/net/ixgb/ixgb_main.c
[SK_BUFF]: Introduce skb_network_offset()
[net-next-2.6.git] / drivers / net / ixgb / ixgb_main.c
index e628126c9c49178211032801e1da1a17a64af338..cfb791bb45e29218693191e7657f60800daf429e 100644 (file)
@@ -36,7 +36,7 @@ static char ixgb_driver_string[] = "Intel(R) PRO/10GbE Network Driver";
 #else
 #define DRIVERNAPI "-NAPI"
 #endif
-#define DRV_VERSION            "1.0.117-k2"DRIVERNAPI
+#define DRV_VERSION            "1.0.126-k2"DRIVERNAPI
 char ixgb_driver_version[] = DRV_VERSION;
 static char ixgb_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
 
@@ -456,9 +456,7 @@ ixgb_probe(struct pci_dev *pdev,
                           NETIF_F_HW_VLAN_TX |
                           NETIF_F_HW_VLAN_RX |
                           NETIF_F_HW_VLAN_FILTER;
-#ifdef NETIF_F_TSO
        netdev->features |= NETIF_F_TSO;
-#endif
 #ifdef NETIF_F_LLTX
        netdev->features |= NETIF_F_LLTX;
 #endif
@@ -1176,7 +1174,6 @@ ixgb_watchdog(unsigned long data)
 static int
 ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
 {
-#ifdef NETIF_F_TSO
        struct ixgb_context_desc *context_desc;
        unsigned int i;
        uint8_t ipcss, ipcso, tucss, tucso, hdr_len;
@@ -1198,7 +1195,7 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
                skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,
                                                      skb->nh.iph->daddr,
                                                      0, IPPROTO_TCP, 0);
-               ipcss = skb->nh.raw - skb->data;
+               ipcss = skb_network_offset(skb);
                ipcso = (void *)&(skb->nh.iph->check) - (void *)skb->data;
                ipcse = skb->h.raw - skb->data - 1;
                tucss = skb->h.raw - skb->data;
@@ -1233,7 +1230,6 @@ ixgb_tso(struct ixgb_adapter *adapter, struct sk_buff *skb)
 
                return 1;
        }
-#endif
 
        return 0;
 }
@@ -1287,6 +1283,7 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
        struct ixgb_buffer *buffer_info;
        int len = skb->len;
        unsigned int offset = 0, size, count = 0, i;
+       unsigned int mss = skb_shinfo(skb)->gso_size;
 
        unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
        unsigned int f;
@@ -1298,6 +1295,11 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
        while(len) {
                buffer_info = &tx_ring->buffer_info[i];
                size = min(len, IXGB_MAX_DATA_PER_TXD);
+               /* Workaround for premature desc write-backs
+                * in TSO mode.  Append 4-byte sentinel desc */
+               if (unlikely(mss && !nr_frags && size == len && size > 8))
+                       size -= 4;
+
                buffer_info->length = size;
                WARN_ON(buffer_info->dma != 0);
                buffer_info->dma =
@@ -1324,6 +1326,13 @@ ixgb_tx_map(struct ixgb_adapter *adapter, struct sk_buff *skb,
                while(len) {
                        buffer_info = &tx_ring->buffer_info[i];
                        size = min(len, IXGB_MAX_DATA_PER_TXD);
+
+                       /* Workaround for premature desc write-backs
+                        * in TSO mode.  Append 4-byte sentinel desc */
+                       if (unlikely(mss && !nr_frags && size == len
+                                    && size > 8))
+                               size -= 4;
+
                        buffer_info->length = size;
                        buffer_info->dma =
                                pci_map_page(adapter->pdev,
@@ -1398,11 +1407,43 @@ ixgb_tx_queue(struct ixgb_adapter *adapter, int count, int vlan_id,int tx_flags)
        IXGB_WRITE_REG(&adapter->hw, TDT, i);
 }
 
+static int __ixgb_maybe_stop_tx(struct net_device *netdev, int size)
+{
+       struct ixgb_adapter *adapter = netdev_priv(netdev);
+       struct ixgb_desc_ring *tx_ring = &adapter->tx_ring;
+
+       netif_stop_queue(netdev);
+       /* Herbert's original patch had:
+        *  smp_mb__after_netif_stop_queue();
+        * but since that doesn't exist yet, just open code it. */
+       smp_mb();
+
+       /* We need to check again in a case another CPU has just
+        * made room available. */
+       if (likely(IXGB_DESC_UNUSED(tx_ring) < size))
+               return -EBUSY;
+
+       /* A reprieve! */
+       netif_start_queue(netdev);
+       ++adapter->restart_queue;
+       return 0;
+}
+
+static int ixgb_maybe_stop_tx(struct net_device *netdev,
+                              struct ixgb_desc_ring *tx_ring, int size)
+{
+       if (likely(IXGB_DESC_UNUSED(tx_ring) >= size))
+               return 0;
+       return __ixgb_maybe_stop_tx(netdev, size);
+}
+
+
 /* Tx Descriptors needed, worst case */
 #define TXD_USE_COUNT(S) (((S) >> IXGB_MAX_TXD_PWR) + \
                         (((S) & (IXGB_MAX_DATA_PER_TXD - 1)) ? 1 : 0))
-#define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) + \
-       MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1
+#define DESC_NEEDED TXD_USE_COUNT(IXGB_MAX_DATA_PER_TXD) /* skb->date */ + \
+       MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE) + 1 /* for context */ \
+       + 1 /* one more needed for sentinel TSO workaround */
 
 static int
 ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
@@ -1430,7 +1471,8 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        spin_lock_irqsave(&adapter->tx_lock, flags);
 #endif
 
-       if(unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED)) {
+       if (unlikely(ixgb_maybe_stop_tx(netdev, &adapter->tx_ring,
+                     DESC_NEEDED))) {
                netif_stop_queue(netdev);
                spin_unlock_irqrestore(&adapter->tx_lock, flags);
                return NETDEV_TX_BUSY;
@@ -1468,8 +1510,7 @@ ixgb_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
 
 #ifdef NETIF_F_LLTX
        /* Make sure there is space in the ring for the next send. */
-       if(unlikely(IXGB_DESC_UNUSED(&adapter->tx_ring) < DESC_NEEDED))
-               netif_stop_queue(netdev);
+       ixgb_maybe_stop_tx(netdev, &adapter->tx_ring, DESC_NEEDED);
 
        spin_unlock_irqrestore(&adapter->tx_lock, flags);
 
@@ -1564,7 +1605,7 @@ ixgb_update_stats(struct ixgb_adapter *adapter)
        struct pci_dev *pdev = adapter->pdev;
 
        /* Prevent stats update while adapter is being reset */
-       if (pdev->error_state && pdev->error_state != pci_channel_io_normal)
+       if (pci_channel_offline(pdev))
                return;
 
        if((netdev->flags & IFF_PROMISC) || (netdev->flags & IFF_ALLMULTI) ||
@@ -2172,8 +2213,7 @@ ixgb_vlan_rx_kill_vid(struct net_device *netdev, uint16_t vid)
 
        ixgb_irq_disable(adapter);
 
-       if(adapter->vlgrp)
-               adapter->vlgrp->vlan_devices[vid] = NULL;
+       vlan_group_set_device(adapter->vlgrp, vid, NULL);
 
        ixgb_irq_enable(adapter);
 
@@ -2193,7 +2233,7 @@ ixgb_restore_vlan(struct ixgb_adapter *adapter)
        if(adapter->vlgrp) {
                uint16_t vid;
                for(vid = 0; vid < VLAN_GROUP_ARRAY_LEN; vid++) {
-                       if(!adapter->vlgrp->vlan_devices[vid])
+                       if(!vlan_group_get_device(adapter->vlgrp, vid))
                                continue;
                        ixgb_vlan_rx_add_vid(adapter->netdev, vid);
                }