]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - drivers/net/ixgbe/ixgbe_main.c
ixgbe: cleanup unclear references to reg_idx
[net-next-2.6.git] / drivers / net / ixgbe / ixgbe_main.c
index 2bd3eb4ee5a1996d9c219c90419c0b9a455c0b5b..f2e81a21186af4f581050d462e1328e29d021a39 100644 (file)
@@ -589,29 +589,33 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
 {
        u32 mask;
 
-       if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_82598EB:
                mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
-       } else {
+               break;
+       case ixgbe_mac_82599EB:
                mask = (qmask & 0xFFFFFFFF);
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
                mask = (qmask >> 32);
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
+               break;
+       default:
+               break;
        }
 }
 
-void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
-                                     struct ixgbe_tx_buffer
-                                     *tx_buffer_info)
+void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring,
+                                     struct ixgbe_tx_buffer *tx_buffer_info)
 {
        if (tx_buffer_info->dma) {
                if (tx_buffer_info->mapped_as_page)
-                       dma_unmap_page(&adapter->pdev->dev,
+                       dma_unmap_page(tx_ring->dev,
                                       tx_buffer_info->dma,
                                       tx_buffer_info->length,
                                       DMA_TO_DEVICE);
                else
-                       dma_unmap_single(&adapter->pdev->dev,
+                       dma_unmap_single(tx_ring->dev,
                                         tx_buffer_info->dma,
                                         tx_buffer_info->length,
                                         DMA_TO_DEVICE);
@@ -643,8 +647,8 @@ static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter,
 #ifdef CONFIG_IXGBE_DCB
        if (adapter->dcb_cfg.pfc_mode_enable) {
                int tc;
-               int reg_idx = tx_ring->reg_idx;
                int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
+               u8 reg_idx = tx_ring->reg_idx;
 
                switch (adapter->hw.mac.type) {
                case ixgbe_mac_82598EB:
@@ -673,6 +677,7 @@ static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter,
                        break;
                default:
                        tc = 0;
+                       break;
                }
                txoff <<= tc;
        }
@@ -688,7 +693,7 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
 
        /* Detect a transmit hang in hardware, this serializes the
         * check with the clearing of time_stamp and movement of eop */
-       adapter->detect_tx_hung = false;
+       clear_check_for_tx_hang(tx_ring);
        if (tx_ring->tx_buffer_info[eop].time_stamp &&
            time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
            ixgbe_tx_xon_state(adapter, tx_ring)) {
@@ -704,8 +709,8 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
                      "  time_stamp           <%lx>\n"
                      "  jiffies              <%lx>\n",
                      tx_ring->queue_index,
-                     IXGBE_READ_REG(hw, tx_ring->head),
-                     IXGBE_READ_REG(hw, tx_ring->tail),
+                     IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
+                     IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
                      tx_ring->next_to_use, eop,
                      tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
                return true;
@@ -734,11 +739,10 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
                               struct ixgbe_ring *tx_ring)
 {
        struct ixgbe_adapter *adapter = q_vector->adapter;
-       struct net_device *netdev = adapter->netdev;
        union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
        struct ixgbe_tx_buffer *tx_buffer_info;
-       unsigned int i, eop, count = 0;
        unsigned int total_bytes = 0, total_packets = 0;
+       u16 i, eop, count = 0;
 
        i = tx_ring->next_to_clean;
        eop = tx_ring->tx_buffer_info[i].next_to_watch;
@@ -749,44 +753,23 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
                bool cleaned = false;
                rmb(); /* read buffer_info after eop_desc */
                for ( ; !cleaned; count++) {
-                       struct sk_buff *skb;
                        tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
                        tx_buffer_info = &tx_ring->tx_buffer_info[i];
-                       cleaned = (i == eop);
-                       skb = tx_buffer_info->skb;
-
-                       if (cleaned && skb) {
-                               unsigned int segs, bytecount;
-                               unsigned int hlen = skb_headlen(skb);
-
-                               /* gso_segs is currently only valid for tcp */
-                               segs = skb_shinfo(skb)->gso_segs ?: 1;
-#ifdef IXGBE_FCOE
-                               /* adjust for FCoE Sequence Offload */
-                               if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
-                                   && (skb->protocol == htons(ETH_P_FCOE)) &&
-                                   skb_is_gso(skb)) {
-                                       hlen = skb_transport_offset(skb) +
-                                               sizeof(struct fc_frame_header) +
-                                               sizeof(struct fcoe_crc_eof);
-                                       segs = DIV_ROUND_UP(skb->len - hlen,
-                                               skb_shinfo(skb)->gso_size);
-                               }
-#endif /* IXGBE_FCOE */
-                               /* multiply data chunks by size of headers */
-                               bytecount = ((segs - 1) * hlen) + skb->len;
-                               total_packets += segs;
-                               total_bytes += bytecount;
-                       }
-
-                       ixgbe_unmap_and_free_tx_resource(adapter,
-                                                        tx_buffer_info);
 
                        tx_desc->wb.status = 0;
+                       cleaned = (i == eop);
 
                        i++;
                        if (i == tx_ring->count)
                                i = 0;
+
+                       if (cleaned && tx_buffer_info->skb) {
+                               total_bytes += tx_buffer_info->bytecount;
+                               total_packets += tx_buffer_info->gso_segs;
+                       }
+
+                       ixgbe_unmap_and_free_tx_resource(tx_ring,
+                                                        tx_buffer_info);
                }
 
                eop = tx_ring->tx_buffer_info[i].next_to_watch;
@@ -794,102 +777,135 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
        }
 
        tx_ring->next_to_clean = i;
+       tx_ring->total_bytes += total_bytes;
+       tx_ring->total_packets += total_packets;
+       u64_stats_update_begin(&tx_ring->syncp);
+       tx_ring->stats.packets += total_packets;
+       tx_ring->stats.bytes += total_bytes;
+       u64_stats_update_end(&tx_ring->syncp);
+
+       if (check_for_tx_hang(tx_ring) &&
+           ixgbe_check_tx_hang(adapter, tx_ring, i)) {
+               /* schedule immediate reset if we believe we hung */
+               e_info(probe, "tx hang %d detected, resetting "
+                      "adapter\n", adapter->tx_timeout_count + 1);
+               ixgbe_tx_timeout(adapter->netdev);
+
+               /* the adapter is about to reset, no point in enabling stuff */
+               return true;
+       }
 
 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
-       if (unlikely(count && netif_carrier_ok(netdev) &&
+       if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
                     (IXGBE_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))) {
                /* Make sure that anybody stopping the queue after this
                 * sees the new next_to_clean.
                 */
                smp_mb();
-               if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
+               if (__netif_subqueue_stopped(tx_ring->netdev, tx_ring->queue_index) &&
                    !test_bit(__IXGBE_DOWN, &adapter->state)) {
-                       netif_wake_subqueue(netdev, tx_ring->queue_index);
-                       ++tx_ring->restart_queue;
-               }
-       }
-
-       if (adapter->detect_tx_hung) {
-               if (ixgbe_check_tx_hang(adapter, tx_ring, i)) {
-                       /* schedule immediate reset if we believe we hung */
-                       e_info(probe, "tx hang %d detected, resetting "
-                              "adapter\n", adapter->tx_timeout_count + 1);
-                       ixgbe_tx_timeout(adapter->netdev);
+                       netif_wake_subqueue(tx_ring->netdev, tx_ring->queue_index);
+                       ++tx_ring->tx_stats.restart_queue;
                }
        }
 
-       /* re-arm the interrupt */
-       if (count >= tx_ring->work_limit)
-               ixgbe_irq_rearm_queues(adapter, ((u64)1 << q_vector->v_idx));
-
-       tx_ring->total_bytes += total_bytes;
-       tx_ring->total_packets += total_packets;
-       u64_stats_update_begin(&tx_ring->syncp);
-       tx_ring->stats.packets += total_packets;
-       tx_ring->stats.bytes += total_bytes;
-       u64_stats_update_end(&tx_ring->syncp);
        return count < tx_ring->work_limit;
 }
 
 #ifdef CONFIG_IXGBE_DCA
 static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
-                               struct ixgbe_ring *rx_ring)
+                               struct ixgbe_ring *rx_ring,
+                               int cpu)
 {
+       struct ixgbe_hw *hw = &adapter->hw;
        u32 rxctrl;
-       int cpu = get_cpu();
-       int q = rx_ring->reg_idx;
-
-       if (rx_ring->cpu != cpu) {
-               rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
-               if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
-                       rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
-                       rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
-               } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
-                       rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
-                       rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
-                                  IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
-               }
-               rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
-               rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
-               rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
-               rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
-                           IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
-               rx_ring->cpu = cpu;
+       u8 reg_idx = rx_ring->reg_idx;
+
+       rxctrl = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(reg_idx));
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
+               rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
+               rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
+               break;
+       case ixgbe_mac_82599EB:
+               rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
+               rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
+                          IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
+               break;
+       default:
+               break;
        }
-       put_cpu();
+       rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
+       rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
+       rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
+       rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
+                   IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
+       IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
 }
 
 static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
-                               struct ixgbe_ring *tx_ring)
+                               struct ixgbe_ring *tx_ring,
+                               int cpu)
 {
+       struct ixgbe_hw *hw = &adapter->hw;
        u32 txctrl;
+       u8 reg_idx = tx_ring->reg_idx;
+
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
+               txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(reg_idx));
+               txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
+               txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
+               txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
+               txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+               IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(reg_idx), txctrl);
+               break;
+       case ixgbe_mac_82599EB:
+               txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx));
+               txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
+               txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
+                          IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
+               txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
+               txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+               IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx), txctrl);
+               break;
+       default:
+               break;
+       }
+}
+
+static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
+{
+       struct ixgbe_adapter *adapter = q_vector->adapter;
        int cpu = get_cpu();
-       int q = tx_ring->reg_idx;
-       struct ixgbe_hw *hw = &adapter->hw;
+       long r_idx;
+       int i;
 
-       if (tx_ring->cpu != cpu) {
-               if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
-                       txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(q));
-                       txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
-                       txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
-                       txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
-                       IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(q), txctrl);
-               } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
-                       txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(q));
-                       txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
-                       txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
-                                 IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
-                       txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
-                       IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(q), txctrl);
-               }
-               tx_ring->cpu = cpu;
+       if (q_vector->cpu == cpu)
+               goto out_no_update;
+
+       r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+       for (i = 0; i < q_vector->txr_count; i++) {
+               ixgbe_update_tx_dca(adapter, adapter->tx_ring[r_idx], cpu);
+               r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
+                                     r_idx + 1);
+       }
+
+       r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+       for (i = 0; i < q_vector->rxr_count; i++) {
+               ixgbe_update_rx_dca(adapter, adapter->rx_ring[r_idx], cpu);
+               r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+                                     r_idx + 1);
        }
+
+       q_vector->cpu = cpu;
+out_no_update:
        put_cpu();
 }
 
 static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
 {
+       int num_q_vectors;
        int i;
 
        if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
@@ -898,22 +914,25 @@ static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
        /* always use CB2 mode, difference is masked in the CB driver */
        IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
 
-       for (i = 0; i < adapter->num_tx_queues; i++) {
-               adapter->tx_ring[i]->cpu = -1;
-               ixgbe_update_tx_dca(adapter, adapter->tx_ring[i]);
-       }
-       for (i = 0; i < adapter->num_rx_queues; i++) {
-               adapter->rx_ring[i]->cpu = -1;
-               ixgbe_update_rx_dca(adapter, adapter->rx_ring[i]);
+       if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+               num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+       else
+               num_q_vectors = 1;
+
+       for (i = 0; i < num_q_vectors; i++) {
+               adapter->q_vector[i]->cpu = -1;
+               ixgbe_update_dca(adapter->q_vector[i]);
        }
 }
 
 static int __ixgbe_notify_dca(struct device *dev, void *data)
 {
-       struct net_device *netdev = dev_get_drvdata(dev);
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
        unsigned long event = *(unsigned long *)data;
 
+       if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
+               return 0;
+
        switch (event) {
        case DCA_PROVIDER_ADD:
                /* if we're already enabled, don't do it again */
@@ -1012,8 +1031,7 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
        skb->ip_summed = CHECKSUM_UNNECESSARY;
 }
 
-static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
-                                        struct ixgbe_ring *rx_ring, u32 val)
+static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
 {
        /*
         * Force memory writes to complete before letting h/w
@@ -1022,72 +1040,81 @@ static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
         * such as IA-64).
         */
        wmb();
-       IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->reg_idx), val);
+       writel(val, rx_ring->tail);
 }
 
 /**
  * ixgbe_alloc_rx_buffers - Replace used receive buffers; packet split
- * @adapter: address of board private structure
+ * @rx_ring: ring to place buffers on
+ * @cleaned_count: number of buffers to replace
  **/
-void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
-                           struct ixgbe_ring *rx_ring,
-                           int cleaned_count)
+void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count)
 {
-       struct net_device *netdev = adapter->netdev;
-       struct pci_dev *pdev = adapter->pdev;
        union ixgbe_adv_rx_desc *rx_desc;
        struct ixgbe_rx_buffer *bi;
-       unsigned int i;
-       unsigned int bufsz = rx_ring->rx_buf_len;
+       struct sk_buff *skb;
+       u16 i = rx_ring->next_to_use;
 
-       i = rx_ring->next_to_use;
-       bi = &rx_ring->rx_buffer_info[i];
+       /* do nothing if no valid netdev defined */
+       if (!rx_ring->netdev)
+               return;
 
        while (cleaned_count--) {
                rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
+               bi = &rx_ring->rx_buffer_info[i];
+               skb = bi->skb;
 
-               if (!bi->page_dma &&
-                   (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) {
-                       if (!bi->page) {
-                               bi->page = netdev_alloc_page(netdev);
-                               if (!bi->page) {
-                                       adapter->alloc_rx_page_failed++;
-                                       goto no_buffers;
-                               }
-                               bi->page_offset = 0;
-                       } else {
-                               /* use a half page if we're re-using */
-                               bi->page_offset ^= (PAGE_SIZE / 2);
-                       }
-
-                       bi->page_dma = dma_map_page(&pdev->dev, bi->page,
-                                                   bi->page_offset,
-                                                   (PAGE_SIZE / 2),
-                                                   DMA_FROM_DEVICE);
-               }
-
-               if (!bi->skb) {
-                       struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev,
-                                                                       bufsz);
-                       bi->skb = skb;
-
+               if (!skb) {
+                       skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
+                                                       rx_ring->rx_buf_len);
                        if (!skb) {
-                               adapter->alloc_rx_buff_failed++;
+                               rx_ring->rx_stats.alloc_rx_buff_failed++;
                                goto no_buffers;
                        }
                        /* initialize queue mapping */
                        skb_record_rx_queue(skb, rx_ring->queue_index);
+                       bi->skb = skb;
                }
 
                if (!bi->dma) {
-                       bi->dma = dma_map_single(&pdev->dev,
-                                                bi->skb->data,
+                       bi->dma = dma_map_single(rx_ring->dev,
+                                                skb->data,
                                                 rx_ring->rx_buf_len,
                                                 DMA_FROM_DEVICE);
+                       if (dma_mapping_error(rx_ring->dev, bi->dma)) {
+                               rx_ring->rx_stats.alloc_rx_buff_failed++;
+                               bi->dma = 0;
+                               goto no_buffers;
+                       }
                }
-               /* Refresh the desc even if buffer_addrs didn't change because
-                * each write-back erases this info. */
-               if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
+
+               if (ring_is_ps_enabled(rx_ring)) {
+                       if (!bi->page) {
+                               bi->page = netdev_alloc_page(rx_ring->netdev);
+                               if (!bi->page) {
+                                       rx_ring->rx_stats.alloc_rx_page_failed++;
+                                       goto no_buffers;
+                               }
+                       }
+
+                       if (!bi->page_dma) {
+                               /* use a half page if we're re-using */
+                               bi->page_offset ^= PAGE_SIZE / 2;
+                               bi->page_dma = dma_map_page(rx_ring->dev,
+                                                           bi->page,
+                                                           bi->page_offset,
+                                                           PAGE_SIZE / 2,
+                                                           DMA_FROM_DEVICE);
+                               if (dma_mapping_error(rx_ring->dev,
+                                                     bi->page_dma)) {
+                                       rx_ring->rx_stats.alloc_rx_page_failed++;
+                                       bi->page_dma = 0;
+                                       goto no_buffers;
+                               }
+                       }
+
+                       /* Refresh the desc even if buffer_addrs didn't change
+                        * because each write-back erases this info. */
                        rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
                        rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
                } else {
@@ -1098,56 +1125,48 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
                i++;
                if (i == rx_ring->count)
                        i = 0;
-               bi = &rx_ring->rx_buffer_info[i];
        }
 
 no_buffers:
        if (rx_ring->next_to_use != i) {
                rx_ring->next_to_use = i;
-               if (i-- == 0)
-                       i = (rx_ring->count - 1);
-
-               ixgbe_release_rx_desc(&adapter->hw, rx_ring, i);
+               ixgbe_release_rx_desc(rx_ring, i);
        }
 }
 
-static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
-{
-       return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
-}
-
-static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
-{
-       return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
-}
-
-static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc)
+static inline u16 ixgbe_get_hlen(union ixgbe_adv_rx_desc *rx_desc)
 {
-       return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
-               IXGBE_RXDADV_RSCCNT_MASK) >>
-               IXGBE_RXDADV_RSCCNT_SHIFT;
+       /* HW will not DMA in data larger than the given buffer, even if it
+        * parses the (NFS, of course) header to be larger.  In that case, it
+        * fills the header buffer and spills the rest into the page.
+        */
+       u16 hdr_info = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info);
+       u16 hlen = (hdr_info &  IXGBE_RXDADV_HDRBUFLEN_MASK) >>
+                   IXGBE_RXDADV_HDRBUFLEN_SHIFT;
+       if (hlen > IXGBE_RX_HDR_SIZE)
+               hlen = IXGBE_RX_HDR_SIZE;
+       return hlen;
 }
 
 /**
  * ixgbe_transform_rsc_queue - change rsc queue into a full packet
  * @skb: pointer to the last skb in the rsc queue
- * @count: pointer to number of packets coalesced in this context
  *
  * This function changes a queue full of hw rsc buffers into a completed
  * packet.  It uses the ->prev pointers to find the first packet and then
  * turns it into the frag list owner.
  **/
-static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
-                                                       u64 *count)
+static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb)
 {
        unsigned int frag_list_size = 0;
+       unsigned int skb_cnt = 1;
 
        while (skb->prev) {
                struct sk_buff *prev = skb->prev;
                frag_list_size += skb->len;
                skb->prev = NULL;
                skb = prev;
-               *count += 1;
+               skb_cnt++;
        }
 
        skb_shinfo(skb)->frag_list = skb->next;
@@ -1155,68 +1174,59 @@ static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
        skb->len += frag_list_size;
        skb->data_len += frag_list_size;
        skb->truesize += frag_list_size;
+       IXGBE_RSC_CB(skb)->skb_cnt = skb_cnt;
+
        return skb;
 }
 
-struct ixgbe_rsc_cb {
-       dma_addr_t dma;
-       bool delay_unmap;
-};
-
-#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
+static inline bool ixgbe_get_rsc_state(union ixgbe_adv_rx_desc *rx_desc)
+{
+       return !!(le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
+               IXGBE_RXDADV_RSCCNT_MASK);
+}
 
-static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
+static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                               struct ixgbe_ring *rx_ring,
                               int *work_done, int work_to_do)
 {
        struct ixgbe_adapter *adapter = q_vector->adapter;
-       struct pci_dev *pdev = adapter->pdev;
        union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
        struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
        struct sk_buff *skb;
-       unsigned int i, rsc_count = 0;
-       u32 len, staterr;
-       u16 hdr_info;
-       bool cleaned = false;
-       int cleaned_count = 0;
        unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+       const int current_node = numa_node_id();
 #ifdef IXGBE_FCOE
        int ddp_bytes = 0;
 #endif /* IXGBE_FCOE */
+       u32 staterr;
+       u16 i;
+       u16 cleaned_count = 0;
+       bool pkt_is_rsc = false;
 
        i = rx_ring->next_to_clean;
        rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
        staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
-       rx_buffer_info = &rx_ring->rx_buffer_info[i];
 
        while (staterr & IXGBE_RXD_STAT_DD) {
                u32 upper_len = 0;
-               if (*work_done >= work_to_do)
-                       break;
-               (*work_done)++;
 
                rmb(); /* read descriptor and rx_buffer_info after status DD */
-               if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
-                       hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
-                       len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
-                              IXGBE_RXDADV_HDRBUFLEN_SHIFT;
-                       upper_len = le16_to_cpu(rx_desc->wb.upper.length);
-                       if ((len > IXGBE_RX_HDR_SIZE) ||
-                           (upper_len && !(hdr_info & IXGBE_RXDADV_SPH)))
-                               len = IXGBE_RX_HDR_SIZE;
-               } else {
-                       len = le16_to_cpu(rx_desc->wb.upper.length);
-               }
 
-               cleaned = true;
+               rx_buffer_info = &rx_ring->rx_buffer_info[i];
+
                skb = rx_buffer_info->skb;
-               prefetch(skb->data);
                rx_buffer_info->skb = NULL;
+               prefetch(skb->data);
 
+               if (ring_is_rsc_enabled(rx_ring))
+                       pkt_is_rsc = ixgbe_get_rsc_state(rx_desc);
+
+               /* if this is a skb from previous receive DMA will be 0 */
                if (rx_buffer_info->dma) {
-                       if ((adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
-                           (!(staterr & IXGBE_RXD_STAT_EOP)) &&
-                                (!(skb->prev))) {
+                       u16 hlen;
+                       if (pkt_is_rsc &&
+                           !(staterr & IXGBE_RXD_STAT_EOP) &&
+                           !skb->prev) {
                                /*
                                 * When HWRSC is enabled, delay unmapping
                                 * of the first packet. It carries the
@@ -1227,29 +1237,42 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                                IXGBE_RSC_CB(skb)->delay_unmap = true;
                                IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
                        } else {
-                               dma_unmap_single(&pdev->dev,
+                               dma_unmap_single(rx_ring->dev,
                                                 rx_buffer_info->dma,
                                                 rx_ring->rx_buf_len,
                                                 DMA_FROM_DEVICE);
                        }
                        rx_buffer_info->dma = 0;
-                       skb_put(skb, len);
+
+                       if (ring_is_ps_enabled(rx_ring)) {
+                               hlen = ixgbe_get_hlen(rx_desc);
+                               upper_len = le16_to_cpu(rx_desc->wb.upper.length);
+                       } else {
+                               hlen = le16_to_cpu(rx_desc->wb.upper.length);
+                       }
+
+                       skb_put(skb, hlen);
+               } else {
+                       /* assume packet split since header is unmapped */
+                       upper_len = le16_to_cpu(rx_desc->wb.upper.length);
                }
 
                if (upper_len) {
-                       dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
-                                      PAGE_SIZE / 2, DMA_FROM_DEVICE);
+                       dma_unmap_page(rx_ring->dev,
+                                      rx_buffer_info->page_dma,
+                                      PAGE_SIZE / 2,
+                                      DMA_FROM_DEVICE);
                        rx_buffer_info->page_dma = 0;
                        skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
                                           rx_buffer_info->page,
                                           rx_buffer_info->page_offset,
                                           upper_len);
 
-                       if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
-                           (page_count(rx_buffer_info->page) != 1))
-                               rx_buffer_info->page = NULL;
-                       else
+                       if ((page_count(rx_buffer_info->page) == 1) &&
+                           (page_to_nid(rx_buffer_info->page) == current_node))
                                get_page(rx_buffer_info->page);
+                       else
+                               rx_buffer_info->page = NULL;
 
                        skb->len += upper_len;
                        skb->data_len += upper_len;
@@ -1264,10 +1287,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                prefetch(next_rxd);
                cleaned_count++;
 
-               if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
-                       rsc_count = ixgbe_get_rsc_count(rx_desc);
-
-               if (rsc_count) {
+               if (pkt_is_rsc) {
                        u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
                                     IXGBE_RXDADV_NEXTP_SHIFT;
                        next_buffer = &rx_ring->rx_buffer_info[nextp];
@@ -1275,32 +1295,8 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                        next_buffer = &rx_ring->rx_buffer_info[i];
                }
 
-               if (staterr & IXGBE_RXD_STAT_EOP) {
-                       if (skb->prev)
-                               skb = ixgbe_transform_rsc_queue(skb,
-                                                               &(rx_ring->rsc_count));
-                       if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
-                               if (IXGBE_RSC_CB(skb)->delay_unmap) {
-                                       dma_unmap_single(&pdev->dev,
-                                                        IXGBE_RSC_CB(skb)->dma,
-                                                        rx_ring->rx_buf_len,
-                                                        DMA_FROM_DEVICE);
-                                       IXGBE_RSC_CB(skb)->dma = 0;
-                                       IXGBE_RSC_CB(skb)->delay_unmap = false;
-                               }
-                               if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
-                                       rx_ring->rsc_count +=
-                                               skb_shinfo(skb)->nr_frags;
-                               else
-                                       rx_ring->rsc_count++;
-                               rx_ring->rsc_flush++;
-                       }
-                       u64_stats_update_begin(&rx_ring->syncp);
-                       rx_ring->stats.packets++;
-                       rx_ring->stats.bytes += skb->len;
-                       u64_stats_update_end(&rx_ring->syncp);
-               } else {
-                       if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
+               if (!(staterr & IXGBE_RXD_STAT_EOP)) {
+                       if (ring_is_ps_enabled(rx_ring)) {
                                rx_buffer_info->skb = next_buffer->skb;
                                rx_buffer_info->dma = next_buffer->dma;
                                next_buffer->skb = skb;
@@ -1309,12 +1305,45 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                                skb->next = next_buffer->skb;
                                skb->next->prev = skb;
                        }
-                       rx_ring->non_eop_descs++;
+                       rx_ring->rx_stats.non_eop_descs++;
                        goto next_desc;
                }
 
+               if (skb->prev) {
+                       skb = ixgbe_transform_rsc_queue(skb);
+                       /* if we got here without RSC the packet is invalid */
+                       if (!pkt_is_rsc) {
+                               __pskb_trim(skb, 0);
+                               rx_buffer_info->skb = skb;
+                               goto next_desc;
+                       }
+               }
+
+               if (ring_is_rsc_enabled(rx_ring)) {
+                       if (IXGBE_RSC_CB(skb)->delay_unmap) {
+                               dma_unmap_single(rx_ring->dev,
+                                                IXGBE_RSC_CB(skb)->dma,
+                                                rx_ring->rx_buf_len,
+                                                DMA_FROM_DEVICE);
+                               IXGBE_RSC_CB(skb)->dma = 0;
+                               IXGBE_RSC_CB(skb)->delay_unmap = false;
+                       }
+               }
+               if (pkt_is_rsc) {
+                       if (ring_is_ps_enabled(rx_ring))
+                               rx_ring->rx_stats.rsc_count +=
+                                       skb_shinfo(skb)->nr_frags;
+                       else
+                               rx_ring->rx_stats.rsc_count +=
+                                       IXGBE_RSC_CB(skb)->skb_cnt;
+                       rx_ring->rx_stats.rsc_flush++;
+               }
+
+               /* ERR_MASK will only have valid bits if EOP set */
                if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
-                       dev_kfree_skb_irq(skb);
+                       /* trim packet back to size 0 and recycle it */
+                       __pskb_trim(skb, 0);
+                       rx_buffer_info->skb = skb;
                        goto next_desc;
                }
 
@@ -1324,7 +1353,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                total_rx_bytes += skb->len;
                total_rx_packets++;
 
-               skb->protocol = eth_type_trans(skb, adapter->netdev);
+               skb->protocol = eth_type_trans(skb, rx_ring->netdev);
 #ifdef IXGBE_FCOE
                /* if ddp, not passing to ULD unless for FCP_RSP or error */
                if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
@@ -1338,16 +1367,18 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
 next_desc:
                rx_desc->wb.upper.status_error = 0;
 
+               (*work_done)++;
+               if (*work_done >= work_to_do)
+                       break;
+
                /* return some buffers to hardware, one at a time is too slow */
                if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
-                       ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
+                       ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
                        cleaned_count = 0;
                }
 
                /* use prefetched values */
                rx_desc = next_rxd;
-               rx_buffer_info = &rx_ring->rx_buffer_info[i];
-
                staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
        }
 
@@ -1355,14 +1386,14 @@ next_desc:
        cleaned_count = IXGBE_DESC_UNUSED(rx_ring);
 
        if (cleaned_count)
-               ixgbe_alloc_rx_buffers(adapter, rx_ring, cleaned_count);
+               ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
 
 #ifdef IXGBE_FCOE
        /* include DDPed FCoE data */
        if (ddp_bytes > 0) {
                unsigned int mss;
 
-               mss = adapter->netdev->mtu - sizeof(struct fcoe_hdr) -
+               mss = rx_ring->netdev->mtu - sizeof(struct fcoe_hdr) -
                        sizeof(struct fc_frame_header) -
                        sizeof(struct fcoe_crc_eof);
                if (mss > 512)
@@ -1374,8 +1405,10 @@ next_desc:
 
        rx_ring->total_packets += total_rx_packets;
        rx_ring->total_bytes += total_rx_bytes;
-
-       return cleaned;
+       u64_stats_update_begin(&rx_ring->syncp);
+       rx_ring->stats.packets += total_rx_packets;
+       rx_ring->stats.bytes += total_rx_bytes;
+       u64_stats_update_end(&rx_ring->syncp);
 }
 
 static int ixgbe_clean_rxonly(struct napi_struct *, int);
@@ -1389,7 +1422,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *, int);
 static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_q_vector *q_vector;
-       int i, j, q_vectors, v_idx, r_idx;
+       int i, q_vectors, v_idx, r_idx;
        u32 mask;
 
        q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
@@ -1405,8 +1438,8 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
                                       adapter->num_rx_queues);
 
                for (i = 0; i < q_vector->rxr_count; i++) {
-                       j = adapter->rx_ring[r_idx]->reg_idx;
-                       ixgbe_set_ivar(adapter, 0, j, v_idx);
+                       u8 reg_idx = adapter->rx_ring[r_idx]->reg_idx;
+                       ixgbe_set_ivar(adapter, 0, reg_idx, v_idx);
                        r_idx = find_next_bit(q_vector->rxr_idx,
                                              adapter->num_rx_queues,
                                              r_idx + 1);
@@ -1415,8 +1448,8 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
                                       adapter->num_tx_queues);
 
                for (i = 0; i < q_vector->txr_count; i++) {
-                       j = adapter->tx_ring[r_idx]->reg_idx;
-                       ixgbe_set_ivar(adapter, 1, j, v_idx);
+                       u8 reg_idx = adapter->tx_ring[r_idx]->reg_idx;
+                       ixgbe_set_ivar(adapter, 1, reg_idx, v_idx);
                        r_idx = find_next_bit(q_vector->txr_idx,
                                              adapter->num_tx_queues,
                                              r_idx + 1);
@@ -1447,11 +1480,18 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
                }
        }
 
-       if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_82598EB:
                ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
                               v_idx);
-       else if (adapter->hw.mac.type == ixgbe_mac_82599EB)
+               break;
+       case ixgbe_mac_82599EB:
                ixgbe_set_ivar(adapter, -1, 1, v_idx);
+               break;
+
+       default:
+               break;
+       }
        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
 
        /* set up to autoclear timer, and the vectors */
@@ -1547,10 +1587,12 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
        int v_idx = q_vector->v_idx;
        u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr);
 
-       if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_82598EB:
                /* must write high and low 16 bits to reset counter */
                itr_reg |= (itr_reg << 16);
-       } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+               break;
+       case ixgbe_mac_82599EB:
                /*
                 * 82599 can support a value of zero, so allow it for
                 * max interrupt rate, but there is an errata where it can
@@ -1565,6 +1607,9 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
                 * immediate assertion of the interrupt
                 */
                itr_reg |= IXGBE_EITR_CNT_WDIS;
+               break;
+       default:
+               break;
        }
        IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
 }
@@ -1693,17 +1738,18 @@ static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
 {
        struct ixgbe_hw *hw = &adapter->hw;
 
+       if (eicr & IXGBE_EICR_GPI_SDP2) {
+               /* Clear the interrupt */
+               IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
+               if (!test_bit(__IXGBE_DOWN, &adapter->state))
+                       schedule_work(&adapter->sfp_config_module_task);
+       }
+
        if (eicr & IXGBE_EICR_GPI_SDP1) {
                /* Clear the interrupt */
                IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
-               schedule_work(&adapter->multispeed_fiber_task);
-       } else if (eicr & IXGBE_EICR_GPI_SDP2) {
-               /* Clear the interrupt */
-               IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
-               schedule_work(&adapter->sfp_config_module_task);
-       } else {
-               /* Interrupt isn't for us... */
-               return;
+               if (!test_bit(__IXGBE_DOWN, &adapter->state))
+                       schedule_work(&adapter->multispeed_fiber_task);
        }
 }
 
@@ -1743,16 +1789,8 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
        if (eicr & IXGBE_EICR_MAILBOX)
                ixgbe_msg_task(adapter);
 
-       if (hw->mac.type == ixgbe_mac_82598EB)
-               ixgbe_check_fan_failure(adapter, eicr);
-
-       if (hw->mac.type == ixgbe_mac_82599EB) {
-               ixgbe_check_sfp_event(adapter, eicr);
-               adapter->interrupt_event = eicr;
-               if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
-                   ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)))
-                       schedule_work(&adapter->check_overtemp_task);
-
+       switch (hw->mac.type) {
+       case ixgbe_mac_82599EB:
                /* Handle Flow Director Full threshold interrupt */
                if (eicr & IXGBE_EICR_FLOW_DIR) {
                        int i;
@@ -1762,12 +1800,24 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
                        for (i = 0; i < adapter->num_tx_queues; i++) {
                                struct ixgbe_ring *tx_ring =
                                                            adapter->tx_ring[i];
-                               if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE,
-                                                      &tx_ring->reinit_state))
+                               if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
+                                                      &tx_ring->state))
                                        schedule_work(&adapter->fdir_reinit_task);
                        }
                }
+               ixgbe_check_sfp_event(adapter, eicr);
+               if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
+                   ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
+                       adapter->interrupt_event = eicr;
+                       schedule_work(&adapter->check_overtemp_task);
+               }
+               break;
+       default:
+               break;
        }
+
+       ixgbe_check_fan_failure(adapter, eicr);
+
        if (!test_bit(__IXGBE_DOWN, &adapter->state))
                IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
 
@@ -1778,15 +1828,23 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
                                           u64 qmask)
 {
        u32 mask;
+       struct ixgbe_hw *hw = &adapter->hw;
 
-       if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
                mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
-       } else {
+               IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
+               break;
+       case ixgbe_mac_82599EB:
                mask = (qmask & 0xFFFFFFFF);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask);
+               if (mask)
+                       IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
                mask = (qmask >> 32);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask);
+               if (mask)
+                       IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
+               break;
+       default:
+               break;
        }
        /* skip the flush */
 }
@@ -1795,15 +1853,23 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
                                            u64 qmask)
 {
        u32 mask;
+       struct ixgbe_hw *hw = &adapter->hw;
 
-       if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
                mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask);
-       } else {
+               IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
+               break;
+       case ixgbe_mac_82599EB:
                mask = (qmask & 0xFFFFFFFF);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), mask);
+               if (mask)
+                       IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
                mask = (qmask >> 32);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), mask);
+               if (mask)
+                       IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
+               break;
+       default:
+               break;
        }
        /* skip the flush */
 }
@@ -1846,8 +1912,13 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
        int r_idx;
        int i;
 
+#ifdef CONFIG_IXGBE_DCA
+       if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+               ixgbe_update_dca(q_vector);
+#endif
+
        r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
-       for (i = 0;  i < q_vector->rxr_count; i++) {
+       for (i = 0; i < q_vector->rxr_count; i++) {
                rx_ring = adapter->rx_ring[r_idx];
                rx_ring->total_bytes = 0;
                rx_ring->total_packets = 0;
@@ -1858,7 +1929,6 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
        if (!q_vector->rxr_count)
                return IRQ_HANDLED;
 
-       /* disable interrupts on this vector only */
        /* EIAM disabled interrupts (on this vector) for us */
        napi_schedule(&q_vector->napi);
 
@@ -1917,13 +1987,14 @@ static int ixgbe_clean_rxonly(struct napi_struct *napi, int budget)
        int work_done = 0;
        long r_idx;
 
-       r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
-       rx_ring = adapter->rx_ring[r_idx];
 #ifdef CONFIG_IXGBE_DCA
        if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
-               ixgbe_update_rx_dca(adapter, rx_ring);
+               ixgbe_update_dca(q_vector);
 #endif
 
+       r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+       rx_ring = adapter->rx_ring[r_idx];
+
        ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
 
        /* If all Rx work done, exit the polling mode */
@@ -1957,13 +2028,14 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
        long r_idx;
        bool tx_clean_complete = true;
 
+#ifdef CONFIG_IXGBE_DCA
+       if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+               ixgbe_update_dca(q_vector);
+#endif
+
        r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
        for (i = 0; i < q_vector->txr_count; i++) {
                ring = adapter->tx_ring[r_idx];
-#ifdef CONFIG_IXGBE_DCA
-               if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
-                       ixgbe_update_tx_dca(adapter, ring);
-#endif
                tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
                r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
                                      r_idx + 1);
@@ -1976,10 +2048,6 @@ static int ixgbe_clean_rxtx_many(struct napi_struct *napi, int budget)
        r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
        for (i = 0; i < q_vector->rxr_count; i++) {
                ring = adapter->rx_ring[r_idx];
-#ifdef CONFIG_IXGBE_DCA
-               if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
-                       ixgbe_update_rx_dca(adapter, ring);
-#endif
                ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
                r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
                                      r_idx + 1);
@@ -2018,13 +2086,14 @@ static int ixgbe_clean_txonly(struct napi_struct *napi, int budget)
        int work_done = 0;
        long r_idx;
 
-       r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
-       tx_ring = adapter->tx_ring[r_idx];
 #ifdef CONFIG_IXGBE_DCA
        if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
-               ixgbe_update_tx_dca(adapter, tx_ring);
+               ixgbe_update_dca(q_vector);
 #endif
 
+       r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+       tx_ring = adapter->tx_ring[r_idx];
+
        if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
                work_done = budget;
 
@@ -2160,9 +2229,11 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
                } else if (handler == &ixgbe_msix_clean_tx) {
                        sprintf(adapter->name[vector], "%s-%s-%d",
                                netdev->name, "tx", ti++);
-               } else
+               } else {
                        sprintf(adapter->name[vector], "%s-%s-%d",
-                               netdev->name, "TxRx", vector);
+                               netdev->name, "TxRx", ri++);
+                       ti++;
+               }
 
                err = request_irq(adapter->msix_entries[vector].vector,
                                  handler, 0, adapter->name[vector],
@@ -2255,12 +2326,16 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
                mask |= IXGBE_EIMS_GPI_SDP0;
        if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
                mask |= IXGBE_EIMS_GPI_SDP1;
-       if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_82599EB:
                mask |= IXGBE_EIMS_ECC;
                mask |= IXGBE_EIMS_GPI_SDP1;
                mask |= IXGBE_EIMS_GPI_SDP2;
                if (adapter->num_vfs)
                        mask |= IXGBE_EIMS_MAILBOX;
+               break;
+       default:
+               break;
        }
        if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
            adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
@@ -2316,13 +2391,20 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
        if (eicr & IXGBE_EICR_LSC)
                ixgbe_check_lsc(adapter);
 
-       if (hw->mac.type == ixgbe_mac_82599EB)
+       switch (hw->mac.type) {
+       case ixgbe_mac_82599EB:
                ixgbe_check_sfp_event(adapter, eicr);
+               if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
+                   ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
+                       adapter->interrupt_event = eicr;
+                       schedule_work(&adapter->check_overtemp_task);
+               }
+               break;
+       default:
+               break;
+       }
 
        ixgbe_check_fan_failure(adapter, eicr);
-       if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
-           ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)))
-               schedule_work(&adapter->check_overtemp_task);
 
        if (napi_schedule_prep(&(q_vector->napi))) {
                adapter->tx_ring[0]->total_packets = 0;
@@ -2415,14 +2497,19 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
  **/
 static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
 {
-       if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_82598EB:
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
-       } else {
+               break;
+       case ixgbe_mac_82599EB:
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
                if (adapter->num_vfs > 32)
                        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
+               break;
+       default:
+               break;
        }
        IXGBE_WRITE_FLUSH(&adapter->hw);
        if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
@@ -2468,7 +2555,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
        u64 tdba = ring->dma;
        int wait_loop = 10;
        u32 txdctl;
-       u16 reg_idx = ring->reg_idx;
+       u8 reg_idx = ring->reg_idx;
 
        /* disable queue to avoid issues while updating state */
        txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
@@ -2483,8 +2570,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
                        ring->count * sizeof(union ixgbe_adv_tx_desc));
        IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
        IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
-       ring->head = IXGBE_TDH(reg_idx);
-       ring->tail = IXGBE_TDT(reg_idx);
+       ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx);
 
        /* configure fetching thresholds */
        if (adapter->rx_itr_setting == 0) {
@@ -2500,7 +2586,14 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
        }
 
        /* reinitialize flowdirector state */
-       set_bit(__IXGBE_FDIR_INIT_DONE, &ring->reinit_state);
+       if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
+           adapter->atr_sample_rate) {
+               ring->atr_sample_rate = adapter->atr_sample_rate;
+               ring->atr_count = 0;
+               set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
+       } else {
+               ring->atr_sample_rate = 0;
+       }
 
        /* enable queue */
        txdctl |= IXGBE_TXDCTL_ENABLE;
@@ -2591,16 +2684,21 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
                                   struct ixgbe_ring *rx_ring)
 {
        u32 srrctl;
-       int index;
-       struct ixgbe_ring_feature *feature = adapter->ring_feature;
+       u8 reg_idx = rx_ring->reg_idx;
 
-       index = rx_ring->reg_idx;
-       if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
-               unsigned long mask;
-               mask = (unsigned long) feature[RING_F_RSS].mask;
-               index = index & mask;
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_82598EB: {
+               struct ixgbe_ring_feature *feature = adapter->ring_feature;
+               const int mask = feature[RING_F_RSS].mask;
+               reg_idx = reg_idx & mask;
        }
-       srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index));
+               break;
+       case ixgbe_mac_82599EB:
+       default:
+               break;
+       }
+
+       srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx));
 
        srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
        srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
@@ -2610,7 +2708,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
        srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
                  IXGBE_SRRCTL_BSIZEHDR_MASK;
 
-       if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
+       if (ring_is_ps_enabled(rx_ring)) {
 #if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
                srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
 #else
@@ -2623,7 +2721,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
                srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
        }
 
-       IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl);
+       IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx), srrctl);
 }
 
 static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
@@ -2703,9 +2801,9 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
        struct ixgbe_hw *hw = &adapter->hw;
        u32 rscctrl;
        int rx_buf_len;
-       u16 reg_idx = ring->reg_idx;
+       u8 reg_idx = ring->reg_idx;
 
-       if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
+       if (!ring_is_rsc_enabled(ring))
                return;
 
        rx_buf_len = ring->rx_buf_len;
@@ -2716,7 +2814,7 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
         * total size of max desc * buf_len is not greater
         * than 65535
         */
-       if (ring->flags & IXGBE_RING_RX_PS_ENABLED) {
+       if (ring_is_ps_enabled(ring)) {
 #if (MAX_SKB_FRAGS > 16)
                rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
 #elif (MAX_SKB_FRAGS > 8)
@@ -2769,9 +2867,9 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
                                       struct ixgbe_ring *ring)
 {
        struct ixgbe_hw *hw = &adapter->hw;
-       int reg_idx = ring->reg_idx;
        int wait_loop = IXGBE_MAX_RX_DESC_POLL;
        u32 rxdctl;
+       u8 reg_idx = ring->reg_idx;
 
        /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */
        if (hw->mac.type == ixgbe_mac_82598EB &&
@@ -2795,7 +2893,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
        struct ixgbe_hw *hw = &adapter->hw;
        u64 rdba = ring->dma;
        u32 rxdctl;
-       u16 reg_idx = ring->reg_idx;
+       u8 reg_idx = ring->reg_idx;
 
        /* disable queue to avoid issues while updating state */
        rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
@@ -2809,8 +2907,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
                        ring->count * sizeof(union ixgbe_adv_rx_desc));
        IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
        IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
-       ring->head = IXGBE_RDH(reg_idx);
-       ring->tail = IXGBE_RDT(reg_idx);
+       ring->tail = hw->hw_addr + IXGBE_RDT(reg_idx);
 
        ixgbe_configure_srrctl(adapter, ring);
        ixgbe_configure_rscctl(adapter, ring);
@@ -2832,7 +2929,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
        IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl);
 
        ixgbe_rx_desc_queue_enable(adapter, ring);
-       ixgbe_alloc_rx_buffers(adapter, ring, IXGBE_DESC_UNUSED(ring));
+       ixgbe_alloc_rx_buffers(ring, IXGBE_DESC_UNUSED(ring));
 }
 
 static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter)
@@ -2955,24 +3052,32 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
                rx_ring->rx_buf_len = rx_buf_len;
 
                if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
-                       rx_ring->flags |= IXGBE_RING_RX_PS_ENABLED;
+                       set_ring_ps_enabled(rx_ring);
                else
-                       rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
+                       clear_ring_ps_enabled(rx_ring);
+
+               if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
+                       set_ring_rsc_enabled(rx_ring);
+               else
+                       clear_ring_rsc_enabled(rx_ring);
 
 #ifdef IXGBE_FCOE
                if (netdev->features & NETIF_F_FCOE_MTU) {
                        struct ixgbe_ring_feature *f;
                        f = &adapter->ring_feature[RING_F_FCOE];
                        if ((i >= f->mask) && (i < f->mask + f->indices)) {
-                               rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
+                               clear_ring_ps_enabled(rx_ring);
                                if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
                                        rx_ring->rx_buf_len =
                                                IXGBE_FCOE_JUMBO_FRAME_SIZE;
+                       } else if (!ring_is_rsc_enabled(rx_ring) &&
+                                  !ring_is_ps_enabled(rx_ring)) {
+                               rx_ring->rx_buf_len =
+                                               IXGBE_FCOE_JUMBO_FRAME_SIZE;
                        }
                }
 #endif /* IXGBE_FCOE */
        }
-
 }
 
 static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
@@ -3348,8 +3453,6 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
        int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
-       u32 txdctl;
-       int i, j;
 
        if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) {
                if (hw->mac.type == ixgbe_mac_82598EB)
@@ -3365,25 +3468,18 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
                max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
 #endif
 
-       ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, max_frame,
+       ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
                                        DCB_TX_CONFIG);
-       ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, max_frame,
+       ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
                                        DCB_RX_CONFIG);
 
-       /* reconfigure the hardware */
-       ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg);
-
-       for (i = 0; i < adapter->num_tx_queues; i++) {
-               j = adapter->tx_ring[i]->reg_idx;
-               txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
-               /* PThresh workaround for Tx hang with DFP enabled. */
-               txdctl |= 32;
-               IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j), txdctl);
-       }
        /* Enable VLAN tag insert/strip */
        adapter->netdev->features |= NETIF_F_HW_VLAN_RX;
 
        hw->mac.ops.set_vfta(&adapter->hw, 0, 0, true);
+
+       /* reconfigure the hardware */
+       ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg);
 }
 
 #endif
@@ -3567,6 +3663,14 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
        clear_bit(__IXGBE_DOWN, &adapter->state);
        ixgbe_napi_enable_all(adapter);
 
+       if (ixgbe_is_sfp(hw)) {
+               ixgbe_sfp_link_config(adapter);
+       } else {
+               err = ixgbe_non_sfp_link_config(hw);
+               if (err)
+                       e_err(probe, "link_config FAILED %d\n", err);
+       }
+
        /* clear any pending interrupts, may auto mask */
        IXGBE_READ_REG(hw, IXGBE_EICR);
        ixgbe_irq_enable(adapter, true, true);
@@ -3581,34 +3685,16 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
                        e_crit(drv, "Fan has stopped, replace the adapter\n");
        }
 
-       /*
-        * For hot-pluggable SFP+ devices, a new SFP+ module may have
-        * arrived before interrupts were enabled but after probe.  Such
-        * devices wouldn't have their type identified yet. We need to
-        * kick off the SFP+ module setup first, then try to bring up link.
-        * If we're not hot-pluggable SFP+, we just need to configure link
-        * and bring it up.
-        */
-       if (hw->phy.type == ixgbe_phy_unknown) {
-               err = hw->phy.ops.identify(hw);
-               if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
-                       /*
-                        * Take the device down and schedule the sfp tasklet
-                        * which will unregister_netdev and log it.
-                        */
-                       ixgbe_down(adapter);
-                       schedule_work(&adapter->sfp_config_module_task);
-                       return err;
-               }
-       }
-
-       if (ixgbe_is_sfp(hw)) {
-               ixgbe_sfp_link_config(adapter);
-       } else {
-               err = ixgbe_non_sfp_link_config(hw);
-               if (err)
-                       e_err(probe, "link_config FAILED %d\n", err);
-       }
+       /*
+        * For hot-pluggable SFP+ devices, a new SFP+ module may have
+        * arrived before interrupts were enabled but after probe.  Such
+        * devices wouldn't have their type identified yet. We need to
+        * kick off the SFP+ module setup first, then try to bring up link.
+        * If we're not hot-pluggable SFP+, we just need to configure link
+        * and bring it up.
+        */
+       if (hw->phy.type == ixgbe_phy_unknown)
+               schedule_work(&adapter->sfp_config_module_task);
 
        /* enable transmits */
        netif_tx_start_all_queues(adapter->netdev);
@@ -3686,15 +3772,13 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
 
 /**
  * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
- * @adapter: board private structure
  * @rx_ring: ring to free buffers from
  **/
-static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
-                               struct ixgbe_ring *rx_ring)
+static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
 {
-       struct pci_dev *pdev = adapter->pdev;
+       struct device *dev = rx_ring->dev;
        unsigned long size;
-       unsigned int i;
+       u16 i;
 
        /* ring already cleared, nothing to do */
        if (!rx_ring->rx_buffer_info)
@@ -3706,7 +3790,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
 
                rx_buffer_info = &rx_ring->rx_buffer_info[i];
                if (rx_buffer_info->dma) {
-                       dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
+                       dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
                                         rx_ring->rx_buf_len,
                                         DMA_FROM_DEVICE);
                        rx_buffer_info->dma = 0;
@@ -3717,7 +3801,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
                        do {
                                struct sk_buff *this = skb;
                                if (IXGBE_RSC_CB(this)->delay_unmap) {
-                                       dma_unmap_single(&pdev->dev,
+                                       dma_unmap_single(dev,
                                                         IXGBE_RSC_CB(this)->dma,
                                                         rx_ring->rx_buf_len,
                                                         DMA_FROM_DEVICE);
@@ -3731,7 +3815,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
                if (!rx_buffer_info->page)
                        continue;
                if (rx_buffer_info->page_dma) {
-                       dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
+                       dma_unmap_page(dev, rx_buffer_info->page_dma,
                                       PAGE_SIZE / 2, DMA_FROM_DEVICE);
                        rx_buffer_info->page_dma = 0;
                }
@@ -3748,24 +3832,17 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
 
        rx_ring->next_to_clean = 0;
        rx_ring->next_to_use = 0;
-
-       if (rx_ring->head)
-               writel(0, adapter->hw.hw_addr + rx_ring->head);
-       if (rx_ring->tail)
-               writel(0, adapter->hw.hw_addr + rx_ring->tail);
 }
 
 /**
  * ixgbe_clean_tx_ring - Free Tx Buffers
- * @adapter: board private structure
  * @tx_ring: ring to be cleaned
  **/
-static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
-                               struct ixgbe_ring *tx_ring)
+static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
 {
        struct ixgbe_tx_buffer *tx_buffer_info;
        unsigned long size;
-       unsigned int i;
+       u16 i;
 
        /* ring already cleared, nothing to do */
        if (!tx_ring->tx_buffer_info)
@@ -3774,7 +3851,7 @@ static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
        /* Free all the Tx ring sk_buffs */
        for (i = 0; i < tx_ring->count; i++) {
                tx_buffer_info = &tx_ring->tx_buffer_info[i];
-               ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+               ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
        }
 
        size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
@@ -3785,11 +3862,6 @@ static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
 
        tx_ring->next_to_use = 0;
        tx_ring->next_to_clean = 0;
-
-       if (tx_ring->head)
-               writel(0, adapter->hw.hw_addr + tx_ring->head);
-       if (tx_ring->tail)
-               writel(0, adapter->hw.hw_addr + tx_ring->tail);
 }
 
 /**
@@ -3801,7 +3873,7 @@ static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
        int i;
 
        for (i = 0; i < adapter->num_rx_queues; i++)
-               ixgbe_clean_rx_ring(adapter, adapter->rx_ring[i]);
+               ixgbe_clean_rx_ring(adapter->rx_ring[i]);
 }
 
 /**
@@ -3813,7 +3885,7 @@ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
        int i;
 
        for (i = 0; i < adapter->num_tx_queues; i++)
-               ixgbe_clean_tx_ring(adapter, adapter->tx_ring[i]);
+               ixgbe_clean_tx_ring(adapter->tx_ring[i]);
 }
 
 void ixgbe_down(struct ixgbe_adapter *adapter)
@@ -3822,7 +3894,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
        struct ixgbe_hw *hw = &adapter->hw;
        u32 rxctrl;
        u32 txdctl;
-       int i, j;
+       int i;
        int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
 
        /* signal that we are down to the interrupt handler */
@@ -3880,16 +3952,21 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
 
        /* disable transmits in the hardware now that interrupts are off */
        for (i = 0; i < adapter->num_tx_queues; i++) {
-               j = adapter->tx_ring[i]->reg_idx;
-               txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
-               IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
+               u8 reg_idx = adapter->tx_ring[i]->reg_idx;
+               txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
+               IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx),
                                (txdctl & ~IXGBE_TXDCTL_ENABLE));
        }
        /* Disable the Tx DMA engine on 82599 */
-       if (hw->mac.type == ixgbe_mac_82599EB)
+       switch (hw->mac.type) {
+       case ixgbe_mac_82599EB:
                IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
                                (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
                                 ~IXGBE_DMATXCTL_TE));
+               break;
+       default:
+               break;
+       }
 
        /* power down the optics */
        if (hw->phy.multispeed_fiber)
@@ -3924,10 +4001,8 @@ static int ixgbe_poll(struct napi_struct *napi, int budget)
        int tx_clean_complete, work_done = 0;
 
 #ifdef CONFIG_IXGBE_DCA
-       if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
-               ixgbe_update_tx_dca(adapter, adapter->tx_ring[0]);
-               ixgbe_update_rx_dca(adapter, adapter->rx_ring[0]);
-       }
+       if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+               ixgbe_update_dca(q_vector);
 #endif
 
        tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]);
@@ -4220,19 +4295,16 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
 static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
 {
        int i;
-       bool ret = false;
 
-       if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
-               for (i = 0; i < adapter->num_rx_queues; i++)
-                       adapter->rx_ring[i]->reg_idx = i;
-               for (i = 0; i < adapter->num_tx_queues; i++)
-                       adapter->tx_ring[i]->reg_idx = i;
-               ret = true;
-       } else {
-               ret = false;
-       }
+       if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
+               return false;
 
-       return ret;
+       for (i = 0; i < adapter->num_rx_queues; i++)
+               adapter->rx_ring[i]->reg_idx = i;
+       for (i = 0; i < adapter->num_tx_queues; i++)
+               adapter->tx_ring[i]->reg_idx = i;
+
+       return true;
 }
 
 #ifdef CONFIG_IXGBE_DCB
@@ -4249,71 +4321,66 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
        bool ret = false;
        int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
 
-       if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
-               if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
-                       /* the number of queues is assumed to be symmetric */
-                       for (i = 0; i < dcb_i; i++) {
-                               adapter->rx_ring[i]->reg_idx = i << 3;
-                               adapter->tx_ring[i]->reg_idx = i << 2;
-                       }
-                       ret = true;
-               } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
-                       if (dcb_i == 8) {
-                               /*
-                                * Tx TC0 starts at: descriptor queue 0
-                                * Tx TC1 starts at: descriptor queue 32
-                                * Tx TC2 starts at: descriptor queue 64
-                                * Tx TC3 starts at: descriptor queue 80
-                                * Tx TC4 starts at: descriptor queue 96
-                                * Tx TC5 starts at: descriptor queue 104
-                                * Tx TC6 starts at: descriptor queue 112
-                                * Tx TC7 starts at: descriptor queue 120
-                                *
-                                * Rx TC0-TC7 are offset by 16 queues each
-                                */
-                               for (i = 0; i < 3; i++) {
-                                       adapter->tx_ring[i]->reg_idx = i << 5;
-                                       adapter->rx_ring[i]->reg_idx = i << 4;
-                               }
-                               for ( ; i < 5; i++) {
-                                       adapter->tx_ring[i]->reg_idx =
-                                                                ((i + 2) << 4);
-                                       adapter->rx_ring[i]->reg_idx = i << 4;
-                               }
-                               for ( ; i < dcb_i; i++) {
-                                       adapter->tx_ring[i]->reg_idx =
-                                                                ((i + 8) << 3);
-                                       adapter->rx_ring[i]->reg_idx = i << 4;
-                               }
+       if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
+               return false;
 
-                               ret = true;
-                       } else if (dcb_i == 4) {
-                               /*
-                                * Tx TC0 starts at: descriptor queue 0
-                                * Tx TC1 starts at: descriptor queue 64
-                                * Tx TC2 starts at: descriptor queue 96
-                                * Tx TC3 starts at: descriptor queue 112
-                                *
-                                * Rx TC0-TC3 are offset by 32 queues each
-                                */
-                               adapter->tx_ring[0]->reg_idx = 0;
-                               adapter->tx_ring[1]->reg_idx = 64;
-                               adapter->tx_ring[2]->reg_idx = 96;
-                               adapter->tx_ring[3]->reg_idx = 112;
-                               for (i = 0 ; i < dcb_i; i++)
-                                       adapter->rx_ring[i]->reg_idx = i << 5;
-
-                               ret = true;
-                       } else {
-                               ret = false;
+       /* the number of queues is assumed to be symmetric */
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_82598EB:
+               for (i = 0; i < dcb_i; i++) {
+                       adapter->rx_ring[i]->reg_idx = i << 3;
+                       adapter->tx_ring[i]->reg_idx = i << 2;
+               }
+               ret = true;
+               break;
+       case ixgbe_mac_82599EB:
+               if (dcb_i == 8) {
+                       /*
+                        * Tx TC0 starts at: descriptor queue 0
+                        * Tx TC1 starts at: descriptor queue 32
+                        * Tx TC2 starts at: descriptor queue 64
+                        * Tx TC3 starts at: descriptor queue 80
+                        * Tx TC4 starts at: descriptor queue 96
+                        * Tx TC5 starts at: descriptor queue 104
+                        * Tx TC6 starts at: descriptor queue 112
+                        * Tx TC7 starts at: descriptor queue 120
+                        *
+                        * Rx TC0-TC7 are offset by 16 queues each
+                        */
+                       for (i = 0; i < 3; i++) {
+                               adapter->tx_ring[i]->reg_idx = i << 5;
+                               adapter->rx_ring[i]->reg_idx = i << 4;
                        }
-               } else {
-                       ret = false;
+                       for ( ; i < 5; i++) {
+                               adapter->tx_ring[i]->reg_idx = ((i + 2) << 4);
+                               adapter->rx_ring[i]->reg_idx = i << 4;
+                       }
+                       for ( ; i < dcb_i; i++) {
+                               adapter->tx_ring[i]->reg_idx = ((i + 8) << 3);
+                               adapter->rx_ring[i]->reg_idx = i << 4;
+                       }
+                       ret = true;
+               } else if (dcb_i == 4) {
+                       /*
+                        * Tx TC0 starts at: descriptor queue 0
+                        * Tx TC1 starts at: descriptor queue 64
+                        * Tx TC2 starts at: descriptor queue 96
+                        * Tx TC3 starts at: descriptor queue 112
+                        *
+                        * Rx TC0-TC3 are offset by 32 queues each
+                        */
+                       adapter->tx_ring[0]->reg_idx = 0;
+                       adapter->tx_ring[1]->reg_idx = 64;
+                       adapter->tx_ring[2]->reg_idx = 96;
+                       adapter->tx_ring[3]->reg_idx = 112;
+                       for (i = 0 ; i < dcb_i; i++)
+                               adapter->rx_ring[i]->reg_idx = i << 5;
+                       ret = true;
                }
-       } else {
-               ret = false;
+               break;
+       default:
+               break;
        }
-
        return ret;
 }
 #endif
@@ -4353,55 +4420,55 @@ static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
  */
 static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
 {
-       int i, fcoe_rx_i = 0, fcoe_tx_i = 0;
-       bool ret = false;
        struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
+       int i;
+       u8 fcoe_rx_i = 0, fcoe_tx_i = 0;
+
+       if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
+               return false;
 
-       if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
 #ifdef CONFIG_IXGBE_DCB
-               if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
-                       struct ixgbe_fcoe *fcoe = &adapter->fcoe;
+       if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+               struct ixgbe_fcoe *fcoe = &adapter->fcoe;
 
-                       ixgbe_cache_ring_dcb(adapter);
-                       /* find out queues in TC for FCoE */
-                       fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1;
-                       fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1;
-                       /*
-                        * In 82599, the number of Tx queues for each traffic
-                        * class for both 8-TC and 4-TC modes are:
-                        * TCs  : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7
-                        * 8 TCs:  32  32  16  16   8   8   8   8
-                        * 4 TCs:  64  64  32  32
-                        * We have max 8 queues for FCoE, where 8 the is
-                        * FCoE redirection table size. If TC for FCoE is
-                        * less than or equal to TC3, we have enough queues
-                        * to add max of 8 queues for FCoE, so we start FCoE
-                        * tx descriptor from the next one, i.e., reg_idx + 1.
-                        * If TC for FCoE is above TC3, implying 8 TC mode,
-                        * and we need 8 for FCoE, we have to take all queues
-                        * in that traffic class for FCoE.
-                        */
-                       if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3))
-                               fcoe_tx_i--;
-               }
+               ixgbe_cache_ring_dcb(adapter);
+               /* find out queues in TC for FCoE */
+               fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1;
+               fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1;
+               /*
+                * In 82599, the number of Tx queues for each traffic
+                * class for both 8-TC and 4-TC modes are:
+                * TCs  : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7
+                * 8 TCs:  32  32  16  16   8   8   8   8
+                * 4 TCs:  64  64  32  32
+                * We have max 8 queues for FCoE, where 8 the is
+                * FCoE redirection table size. If TC for FCoE is
+                * less than or equal to TC3, we have enough queues
+                * to add max of 8 queues for FCoE, so we start FCoE
+                * Tx queue from the next one, i.e., reg_idx + 1.
+                * If TC for FCoE is above TC3, implying 8 TC mode,
+                * and we need 8 for FCoE, we have to take all queues
+                * in that traffic class for FCoE.
+                */
+               if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3))
+                       fcoe_tx_i--;
+       }
 #endif /* CONFIG_IXGBE_DCB */
-               if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
-                       if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
-                           (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
-                               ixgbe_cache_ring_fdir(adapter);
-                       else
-                               ixgbe_cache_ring_rss(adapter);
+       if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
+               if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
+                   (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
+                       ixgbe_cache_ring_fdir(adapter);
+               else
+                       ixgbe_cache_ring_rss(adapter);
 
-                       fcoe_rx_i = f->mask;
-                       fcoe_tx_i = f->mask;
-               }
-               for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
-                       adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
-                       adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
-               }
-               ret = true;
+               fcoe_rx_i = f->mask;
+               fcoe_tx_i = f->mask;
        }
-       return ret;
+       for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
+               adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
+               adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
+       }
+       return true;
 }
 
 #endif /* IXGBE_FCOE */
@@ -4471,6 +4538,7 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
 static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
 {
        int i;
+       int rx_count;
        int orig_node = adapter->node;
 
        for (i = 0; i < adapter->num_tx_queues; i++) {
@@ -4489,6 +4557,8 @@ static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
                        goto err_tx_ring_allocation;
                ring->count = adapter->tx_ring_count;
                ring->queue_index = i;
+               ring->dev = &adapter->pdev->dev;
+               ring->netdev = adapter->netdev;
                ring->numa_node = adapter->node;
 
                adapter->tx_ring[i] = ring;
@@ -4497,6 +4567,7 @@ static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
        /* Restore the adapter's original node */
        adapter->node = orig_node;
 
+       rx_count = adapter->rx_ring_count;
        for (i = 0; i < adapter->num_rx_queues; i++) {
                struct ixgbe_ring *ring = adapter->rx_ring[i];
                if (orig_node == -1) {
@@ -4511,8 +4582,10 @@ static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
                        ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
                if (!ring)
                        goto err_rx_ring_allocation;
-               ring->count = adapter->rx_ring_count;
+               ring->count = rx_count;
                ring->queue_index = i;
+               ring->dev = &adapter->pdev->dev;
+               ring->netdev = adapter->netdev;
                ring->numa_node = adapter->node;
 
                adapter->rx_ring[i] = ring;
@@ -4750,6 +4823,11 @@ err_set_interrupt:
        return err;
 }
 
+static void ring_free_rcu(struct rcu_head *head)
+{
+       kfree(container_of(head, struct ixgbe_ring, rcu));
+}
+
 /**
  * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
  * @adapter: board private structure to clear interrupt scheme on
@@ -4766,7 +4844,12 @@ void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
                adapter->tx_ring[i] = NULL;
        }
        for (i = 0; i < adapter->num_rx_queues; i++) {
-               kfree(adapter->rx_ring[i]);
+               struct ixgbe_ring *ring = adapter->rx_ring[i];
+
+               /* ixgbe_get_stats64() might access this ring, we must wait
+                * a grace period before freeing it.
+                */
+               call_rcu(&ring->rcu, ring_free_rcu);
                adapter->rx_ring[i] = NULL;
        }
 
@@ -4843,6 +4926,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
        int j;
        struct tc_configuration *tc;
 #endif
+       int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
 
        /* PCI config space info */
 
@@ -4857,11 +4941,13 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
        adapter->ring_feature[RING_F_RSS].indices = rss;
        adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
        adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
-       if (hw->mac.type == ixgbe_mac_82598EB) {
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
                if (hw->device_id == IXGBE_DEV_ID_82598AT)
                        adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
                adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
-       } else if (hw->mac.type == ixgbe_mac_82599EB) {
+               break;
+       case ixgbe_mac_82599EB:
                adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
                adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
                adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
@@ -4890,6 +4976,9 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
                adapter->fcoe.up = IXGBE_FCOE_DEFTC;
 #endif
 #endif /* IXGBE_FCOE */
+               break;
+       default:
+               break;
        }
 
 #ifdef CONFIG_IXGBE_DCB
@@ -4919,8 +5008,8 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
 #ifdef CONFIG_DCB
        adapter->last_lfc_mode = hw->fc.current_mode;
 #endif
-       hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
-       hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
+       hw->fc.high_water = FC_HIGH_WATER(max_frame);
+       hw->fc.low_water = FC_LOW_WATER(max_frame);
        hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
        hw->fc.send_xon = true;
        hw->fc.disable_fc_autoneg = false;
@@ -4958,15 +5047,13 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
 
 /**
  * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
- * @adapter: board private structure
  * @tx_ring:    tx descriptor ring (for a specific queue) to setup
  *
  * Return 0 on success, negative on failure
  **/
-int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
-                            struct ixgbe_ring *tx_ring)
+int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
 {
-       struct pci_dev *pdev = adapter->pdev;
+       struct device *dev = tx_ring->dev;
        int size;
 
        size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
@@ -4981,7 +5068,7 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
        tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
        tx_ring->size = ALIGN(tx_ring->size, 4096);
 
-       tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+       tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
                                           &tx_ring->dma, GFP_KERNEL);
        if (!tx_ring->desc)
                goto err;
@@ -4994,7 +5081,7 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
 err:
        vfree(tx_ring->tx_buffer_info);
        tx_ring->tx_buffer_info = NULL;
-       e_err(probe, "Unable to allocate memory for the Tx descriptor ring\n");
+       dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
        return -ENOMEM;
 }
 
@@ -5013,7 +5100,7 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
        int i, err = 0;
 
        for (i = 0; i < adapter->num_tx_queues; i++) {
-               err = ixgbe_setup_tx_resources(adapter, adapter->tx_ring[i]);
+               err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
                if (!err)
                        continue;
                e_err(probe, "Allocation for Tx Queue %u failed\n", i);
@@ -5025,48 +5112,41 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
 
 /**
  * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
- * @adapter: board private structure
  * @rx_ring:    rx descriptor ring (for a specific queue) to setup
  *
  * Returns 0 on success, negative on failure
  **/
-int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
-                            struct ixgbe_ring *rx_ring)
+int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
 {
-       struct pci_dev *pdev = adapter->pdev;
+       struct device *dev = rx_ring->dev;
        int size;
 
        size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
-       rx_ring->rx_buffer_info = vmalloc_node(size, adapter->node);
+       rx_ring->rx_buffer_info = vmalloc_node(size, rx_ring->numa_node);
        if (!rx_ring->rx_buffer_info)
                rx_ring->rx_buffer_info = vmalloc(size);
-       if (!rx_ring->rx_buffer_info) {
-               e_err(probe, "vmalloc allocation failed for the Rx "
-                     "descriptor ring\n");
-               goto alloc_failed;
-       }
+       if (!rx_ring->rx_buffer_info)
+               goto err;
        memset(rx_ring->rx_buffer_info, 0, size);
 
        /* Round up to nearest 4K */
        rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
        rx_ring->size = ALIGN(rx_ring->size, 4096);
 
-       rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+       rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
                                           &rx_ring->dma, GFP_KERNEL);
 
-       if (!rx_ring->desc) {
-               e_err(probe, "Memory allocation failed for the Rx "
-                     "descriptor ring\n");
-               vfree(rx_ring->rx_buffer_info);
-               goto alloc_failed;
-       }
+       if (!rx_ring->desc)
+               goto err;
 
        rx_ring->next_to_clean = 0;
        rx_ring->next_to_use = 0;
 
        return 0;
-
-alloc_failed:
+err:
+       vfree(rx_ring->rx_buffer_info);
+       rx_ring->rx_buffer_info = NULL;
+       dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
        return -ENOMEM;
 }
 
@@ -5080,13 +5160,12 @@ alloc_failed:
  *
  * Return 0 on success, negative on failure
  **/
-
 static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
 {
        int i, err = 0;
 
        for (i = 0; i < adapter->num_rx_queues; i++) {
-               err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]);
+               err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);
                if (!err)
                        continue;
                e_err(probe, "Allocation for Rx Queue %u failed\n", i);
@@ -5098,23 +5177,23 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
 
 /**
  * ixgbe_free_tx_resources - Free Tx Resources per Queue
- * @adapter: board private structure
  * @tx_ring: Tx descriptor ring for a specific queue
  *
  * Free all transmit software resources
  **/
-void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
-                            struct ixgbe_ring *tx_ring)
+void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
 {
-       struct pci_dev *pdev = adapter->pdev;
-
-       ixgbe_clean_tx_ring(adapter, tx_ring);
+       ixgbe_clean_tx_ring(tx_ring);
 
        vfree(tx_ring->tx_buffer_info);
        tx_ring->tx_buffer_info = NULL;
 
-       dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
-                         tx_ring->dma);
+       /* if not set, then don't free */
+       if (!tx_ring->desc)
+               return;
+
+       dma_free_coherent(tx_ring->dev, tx_ring->size,
+                         tx_ring->desc, tx_ring->dma);
 
        tx_ring->desc = NULL;
 }
@@ -5131,28 +5210,28 @@ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
 
        for (i = 0; i < adapter->num_tx_queues; i++)
                if (adapter->tx_ring[i]->desc)
-                       ixgbe_free_tx_resources(adapter, adapter->tx_ring[i]);
+                       ixgbe_free_tx_resources(adapter->tx_ring[i]);
 }
 
 /**
  * ixgbe_free_rx_resources - Free Rx Resources
- * @adapter: board private structure
  * @rx_ring: ring to clean the resources from
  *
  * Free all receive software resources
  **/
-void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
-                            struct ixgbe_ring *rx_ring)
+void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
 {
-       struct pci_dev *pdev = adapter->pdev;
-
-       ixgbe_clean_rx_ring(adapter, rx_ring);
+       ixgbe_clean_rx_ring(rx_ring);
 
        vfree(rx_ring->rx_buffer_info);
        rx_ring->rx_buffer_info = NULL;
 
-       dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
-                         rx_ring->dma);
+       /* if not set, then don't free */
+       if (!rx_ring->desc)
+               return;
+
+       dma_free_coherent(rx_ring->dev, rx_ring->size,
+                         rx_ring->desc, rx_ring->dma);
 
        rx_ring->desc = NULL;
 }
@@ -5169,7 +5248,7 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
 
        for (i = 0; i < adapter->num_rx_queues; i++)
                if (adapter->rx_ring[i]->desc)
-                       ixgbe_free_rx_resources(adapter, adapter->rx_ring[i]);
+                       ixgbe_free_rx_resources(adapter->rx_ring[i]);
 }
 
 /**
@@ -5182,6 +5261,7 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
 static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       struct ixgbe_hw *hw = &adapter->hw;
        int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
 
        /* MTU < 68 is an error and causes problems on some kernels */
@@ -5192,6 +5272,9 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
        /* must set new MTU before calling down or up */
        netdev->mtu = new_mtu;
 
+       hw->fc.high_water = FC_HIGH_WATER(max_frame);
+       hw->fc.low_water = FC_LOW_WATER(max_frame);
+
        if (netif_running(netdev))
                ixgbe_reinit_locked(adapter);
 
@@ -5287,8 +5370,8 @@ static int ixgbe_close(struct net_device *netdev)
 #ifdef CONFIG_PM
 static int ixgbe_resume(struct pci_dev *pdev)
 {
-       struct net_device *netdev = pci_get_drvdata(pdev);
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
+       struct net_device *netdev = adapter->netdev;
        u32 err;
 
        pci_set_power_state(pdev, PCI_D0);
@@ -5319,7 +5402,7 @@ static int ixgbe_resume(struct pci_dev *pdev)
        IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
 
        if (netif_running(netdev)) {
-               err = ixgbe_open(adapter->netdev);
+               err = ixgbe_open(netdev);
                if (err)
                        return err;
        }
@@ -5332,8 +5415,8 @@ static int ixgbe_resume(struct pci_dev *pdev)
 
 static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
 {
-       struct net_device *netdev = pci_get_drvdata(pdev);
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
+       struct net_device *netdev = adapter->netdev;
        struct ixgbe_hw *hw = &adapter->hw;
        u32 ctrl, fctrl;
        u32 wufc = adapter->wol;
@@ -5350,6 +5433,8 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
                ixgbe_free_all_rx_resources(adapter);
        }
 
+       ixgbe_clear_interrupt_scheme(adapter);
+
 #ifdef CONFIG_PM
        retval = pci_save_state(pdev);
        if (retval)
@@ -5376,15 +5461,19 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
                IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
        }
 
-       if (wufc && hw->mac.type == ixgbe_mac_82599EB)
-               pci_wake_from_d3(pdev, true);
-       else
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
                pci_wake_from_d3(pdev, false);
+               break;
+       case ixgbe_mac_82599EB:
+               pci_wake_from_d3(pdev, !!wufc);
+               break;
+       default:
+               break;
+       }
 
        *enable_wake = !!wufc;
 
-       ixgbe_clear_interrupt_scheme(adapter);
-
        ixgbe_release_hw_control(adapter);
 
        pci_disable_device(pdev);
@@ -5433,10 +5522,12 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
        struct ixgbe_hw *hw = &adapter->hw;
+       struct ixgbe_hw_stats *hwstats = &adapter->stats;
        u64 total_mpc = 0;
        u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
-       u64 non_eop_descs = 0, restart_queue = 0;
-       struct ixgbe_hw_stats *hwstats = &adapter->stats;
+       u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
+       u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
+       u64 bytes = 0, packets = 0;
 
        if (test_bit(__IXGBE_DOWN, &adapter->state) ||
            test_bit(__IXGBE_RESETTING, &adapter->state))
@@ -5449,21 +5540,41 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
                        adapter->hw_rx_no_dma_resources +=
                                IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
                for (i = 0; i < adapter->num_rx_queues; i++) {
-                       rsc_count += adapter->rx_ring[i]->rsc_count;
-                       rsc_flush += adapter->rx_ring[i]->rsc_flush;
+                       rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
+                       rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
                }
                adapter->rsc_total_count = rsc_count;
                adapter->rsc_total_flush = rsc_flush;
        }
 
+       for (i = 0; i < adapter->num_rx_queues; i++) {
+               struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
+               non_eop_descs += rx_ring->rx_stats.non_eop_descs;
+               alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
+               alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
+               bytes += rx_ring->stats.bytes;
+               packets += rx_ring->stats.packets;
+       }
+       adapter->non_eop_descs = non_eop_descs;
+       adapter->alloc_rx_page_failed = alloc_rx_page_failed;
+       adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
+       netdev->stats.rx_bytes = bytes;
+       netdev->stats.rx_packets = packets;
+
+       bytes = 0;
+       packets = 0;
        /* gather some stats to the adapter struct that are per queue */
-       for (i = 0; i < adapter->num_tx_queues; i++)
-               restart_queue += adapter->tx_ring[i]->restart_queue;
+       for (i = 0; i < adapter->num_tx_queues; i++) {
+               struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
+               restart_queue += tx_ring->tx_stats.restart_queue;
+               tx_busy += tx_ring->tx_stats.tx_busy;
+               bytes += tx_ring->stats.bytes;
+               packets += tx_ring->stats.packets;
+       }
        adapter->restart_queue = restart_queue;
-
-       for (i = 0; i < adapter->num_rx_queues; i++)
-               non_eop_descs += adapter->rx_ring[i]->non_eop_descs;
-       adapter->non_eop_descs = non_eop_descs;
+       adapter->tx_busy = tx_busy;
+       netdev->stats.tx_bytes = bytes;
+       netdev->stats.tx_packets = packets;
 
        hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
        for (i = 0; i < 8; i++) {
@@ -5478,17 +5589,21 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
                hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
                hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
                hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
-               if (hw->mac.type == ixgbe_mac_82599EB) {
-                       hwstats->pxonrxc[i] +=
-                               IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
-                       hwstats->pxoffrxc[i] +=
-                               IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
-                       hwstats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
-               } else {
+               switch (hw->mac.type) {
+               case ixgbe_mac_82598EB:
                        hwstats->pxonrxc[i] +=
                                IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
                        hwstats->pxoffrxc[i] +=
                                IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
+                       break;
+               case ixgbe_mac_82599EB:
+                       hwstats->pxonrxc[i] +=
+                               IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
+                       hwstats->pxoffrxc[i] +=
+                               IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
+                       break;
+               default:
+                       break;
                }
                hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
                hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
@@ -5498,18 +5613,21 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
        hwstats->gprc -= missed_rx;
 
        /* 82598 hardware only has a 32 bit counter in the high register */
-       if (hw->mac.type == ixgbe_mac_82599EB) {
-               u64 tmp;
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
+               hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
+               hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
+               hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
+               hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
+               hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
+               break;
+       case ixgbe_mac_82599EB:
                hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
-               tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF;
-                                               /* 4 high bits of GORC */
-               hwstats->gorc += (tmp << 32);
+               IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
                hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
-               tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF;
-                                               /* 4 high bits of GOTC */
-               hwstats->gotc += (tmp << 32);
+               IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */
                hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
-               IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
+               IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
                hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
                hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
                hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
@@ -5522,12 +5640,9 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
                hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
                hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
 #endif /* IXGBE_FCOE */
-       } else {
-               hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
-               hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
-               hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
-               hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
-               hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
+               break;
+       default:
+               break;
        }
        bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
        hwstats->bprc += bprc;
@@ -5700,8 +5815,8 @@ static void ixgbe_fdir_reinit_task(struct work_struct *work)
 
        if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
                for (i = 0; i < adapter->num_tx_queues; i++)
-                       set_bit(__IXGBE_FDIR_INIT_DONE,
-                               &(adapter->tx_ring[i]->reinit_state));
+                       set_bit(__IXGBE_TX_FDIR_INIT_DONE,
+                               &(adapter->tx_ring[i]->state));
        } else {
                e_err(probe, "failed to finish FDIR re-initialization, "
                      "ignored adding FDIR ATR filters\n");
@@ -5763,17 +5878,26 @@ static void ixgbe_watchdog_task(struct work_struct *work)
                if (!netif_carrier_ok(netdev)) {
                        bool flow_rx, flow_tx;
 
-                       if (hw->mac.type == ixgbe_mac_82599EB) {
-                               u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
-                               u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
-                               flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
-                               flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
-                       } else {
+                       switch (hw->mac.type) {
+                       case ixgbe_mac_82598EB: {
                                u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
                                u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
                                flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
                                flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
                        }
+                               break;
+                       case ixgbe_mac_82599EB: {
+                               u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+                               u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
+                               flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
+                               flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
+                       }
+                               break;
+                       default:
+                               flow_tx = false;
+                               flow_rx = false;
+                               break;
+                       }
 
                        e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
                               (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
@@ -5787,7 +5911,10 @@ static void ixgbe_watchdog_task(struct work_struct *work)
                        netif_carrier_on(netdev);
                } else {
                        /* Force detection of hung controller */
-                       adapter->detect_tx_hung = true;
+                       for (i = 0; i < adapter->num_tx_queues; i++) {
+                               tx_ring = adapter->tx_ring[i];
+                               set_check_for_tx_hang(tx_ring);
+                       }
                }
        } else {
                adapter->link_up = false;
@@ -5823,7 +5950,7 @@ static void ixgbe_watchdog_task(struct work_struct *work)
 
 static int ixgbe_tso(struct ixgbe_adapter *adapter,
                     struct ixgbe_ring *tx_ring, struct sk_buff *skb,
-                    u32 tx_flags, u8 *hdr_len)
+                    u32 tx_flags, u8 *hdr_len, __be16 protocol)
 {
        struct ixgbe_adv_tx_context_desc *context_desc;
        unsigned int i;
@@ -5841,7 +5968,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
                l4len = tcp_hdrlen(skb);
                *hdr_len += l4len;
 
-               if (skb->protocol == htons(ETH_P_IP)) {
+               if (protocol == htons(ETH_P_IP)) {
                        struct iphdr *iph = ip_hdr(skb);
                        iph->tot_len = 0;
                        iph->check = 0;
@@ -5880,7 +6007,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
                type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
                                   IXGBE_ADVTXD_DTYP_CTXT);
 
-               if (skb->protocol == htons(ETH_P_IP))
+               if (protocol == htons(ETH_P_IP))
                        type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
                type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
                context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
@@ -5906,16 +6033,10 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
        return false;
 }
 
-static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb)
+static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb,
+                     __be16 protocol)
 {
        u32 rtn = 0;
-       __be16 protocol;
-
-       if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
-               protocol = ((const struct vlan_ethhdr *)skb->data)->
-                                       h_vlan_encapsulated_proto;
-       else
-               protocol = skb->protocol;
 
        switch (protocol) {
        case cpu_to_be16(ETH_P_IP):
@@ -5943,7 +6064,7 @@ static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb)
        default:
                if (unlikely(net_ratelimit()))
                        e_warn(probe, "partial checksum but proto=%x!\n",
-                              skb->protocol);
+                              protocol);
                break;
        }
 
@@ -5952,7 +6073,8 @@ static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb)
 
 static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
                          struct ixgbe_ring *tx_ring,
-                         struct sk_buff *skb, u32 tx_flags)
+                         struct sk_buff *skb, u32 tx_flags,
+                         __be16 protocol)
 {
        struct ixgbe_adv_tx_context_desc *context_desc;
        unsigned int i;
@@ -5981,7 +6103,7 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
                                    IXGBE_ADVTXD_DTYP_CTXT);
 
                if (skb->ip_summed == CHECKSUM_PARTIAL)
-                       type_tucmd_mlhl |= ixgbe_psum(adapter, skb);
+                       type_tucmd_mlhl |= ixgbe_psum(adapter, skb, protocol);
 
                context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
                /* use index zero for tx checksum offload */
@@ -6004,15 +6126,17 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
 static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
                        struct ixgbe_ring *tx_ring,
                        struct sk_buff *skb, u32 tx_flags,
-                       unsigned int first)
+                       unsigned int first, const u8 hdr_len)
 {
-       struct pci_dev *pdev = adapter->pdev;
+       struct device *dev = tx_ring->dev;
        struct ixgbe_tx_buffer *tx_buffer_info;
        unsigned int len;
        unsigned int total = skb->len;
        unsigned int offset = 0, size, count = 0, i;
        unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
        unsigned int f;
+       unsigned int bytecount = skb->len;
+       u16 gso_segs = 1;
 
        i = tx_ring->next_to_use;
 
@@ -6027,10 +6151,10 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
 
                tx_buffer_info->length = size;
                tx_buffer_info->mapped_as_page = false;
-               tx_buffer_info->dma = dma_map_single(&pdev->dev,
+               tx_buffer_info->dma = dma_map_single(dev,
                                                     skb->data + offset,
                                                     size, DMA_TO_DEVICE);
-               if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
+               if (dma_mapping_error(dev, tx_buffer_info->dma))
                        goto dma_error;
                tx_buffer_info->time_stamp = jiffies;
                tx_buffer_info->next_to_watch = i;
@@ -6063,12 +6187,12 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
                        size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
 
                        tx_buffer_info->length = size;
-                       tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev,
+                       tx_buffer_info->dma = dma_map_page(dev,
                                                           frag->page,
                                                           offset, size,
                                                           DMA_TO_DEVICE);
                        tx_buffer_info->mapped_as_page = true;
-                       if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
+                       if (dma_mapping_error(dev, tx_buffer_info->dma))
                                goto dma_error;
                        tx_buffer_info->time_stamp = jiffies;
                        tx_buffer_info->next_to_watch = i;
@@ -6082,6 +6206,19 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
                        break;
        }
 
+       if (tx_flags & IXGBE_TX_FLAGS_TSO)
+               gso_segs = skb_shinfo(skb)->gso_segs;
+#ifdef IXGBE_FCOE
+       /* adjust for FCoE Sequence Offload */
+       else if (tx_flags & IXGBE_TX_FLAGS_FSO)
+               gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
+                                       skb_shinfo(skb)->gso_size);
+#endif /* IXGBE_FCOE */
+       bytecount += (gso_segs - 1) * hdr_len;
+
+       /* multiply data chunks by size of headers */
+       tx_ring->tx_buffer_info[i].bytecount = bytecount;
+       tx_ring->tx_buffer_info[i].gso_segs = gso_segs;
        tx_ring->tx_buffer_info[i].skb = skb;
        tx_ring->tx_buffer_info[first].next_to_watch = i;
 
@@ -6103,14 +6240,13 @@ dma_error:
                        i += tx_ring->count;
                i--;
                tx_buffer_info = &tx_ring->tx_buffer_info[i];
-               ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+               ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
        }
 
        return 0;
 }
 
-static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
-                          struct ixgbe_ring *tx_ring,
+static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring,
                           int tx_flags, int count, u32 paylen, u8 hdr_len)
 {
        union ixgbe_adv_tx_desc *tx_desc = NULL;
@@ -6175,60 +6311,46 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
        wmb();
 
        tx_ring->next_to_use = i;
-       writel(i, adapter->hw.hw_addr + tx_ring->tail);
+       writel(i, tx_ring->tail);
 }
 
 static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
-                     int queue, u32 tx_flags)
+                     u8 queue, u32 tx_flags, __be16 protocol)
 {
        struct ixgbe_atr_input atr_input;
-       struct tcphdr *th;
        struct iphdr *iph = ip_hdr(skb);
        struct ethhdr *eth = (struct ethhdr *)skb->data;
-       u16 vlan_id, src_port, dst_port, flex_bytes;
-       u32 src_ipv4_addr, dst_ipv4_addr;
-       u8 l4type = 0;
+       struct tcphdr *th;
+       u16 vlan_id;
 
-       /* Right now, we support IPv4 only */
-       if (skb->protocol != htons(ETH_P_IP))
-               return;
-       /* check if we're UDP or TCP */
-       if (iph->protocol == IPPROTO_TCP) {
-               th = tcp_hdr(skb);
-               src_port = th->source;
-               dst_port = th->dest;
-               l4type |= IXGBE_ATR_L4TYPE_TCP;
-               /* l4type IPv4 type is 0, no need to assign */
-       } else {
-               /* Unsupported L4 header, just bail here */
+       /* Right now, we support IPv4 w/ TCP only */
+       if (protocol != htons(ETH_P_IP) ||
+           iph->protocol != IPPROTO_TCP)
                return;
-       }
 
        memset(&atr_input, 0, sizeof(struct ixgbe_atr_input));
 
        vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >>
                   IXGBE_TX_FLAGS_VLAN_SHIFT;
-       src_ipv4_addr = iph->saddr;
-       dst_ipv4_addr = iph->daddr;
-       flex_bytes = eth->h_proto;
+
+       th = tcp_hdr(skb);
 
        ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id);
-       ixgbe_atr_set_src_port_82599(&atr_input, dst_port);
-       ixgbe_atr_set_dst_port_82599(&atr_input, src_port);
-       ixgbe_atr_set_flex_byte_82599(&atr_input, flex_bytes);
-       ixgbe_atr_set_l4type_82599(&atr_input, l4type);
+       ixgbe_atr_set_src_port_82599(&atr_input, th->dest);
+       ixgbe_atr_set_dst_port_82599(&atr_input, th->source);
+       ixgbe_atr_set_flex_byte_82599(&atr_input, eth->h_proto);
+       ixgbe_atr_set_l4type_82599(&atr_input, IXGBE_ATR_L4TYPE_TCP);
        /* src and dst are inverted, think how the receiver sees them */
-       ixgbe_atr_set_src_ipv4_82599(&atr_input, dst_ipv4_addr);
-       ixgbe_atr_set_dst_ipv4_82599(&atr_input, src_ipv4_addr);
+       ixgbe_atr_set_src_ipv4_82599(&atr_input, iph->daddr);
+       ixgbe_atr_set_dst_ipv4_82599(&atr_input, iph->saddr);
 
        /* This assumes the Rx queue and Tx queue are bound to the same CPU */
        ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue);
 }
 
-static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
-                                struct ixgbe_ring *tx_ring, int size)
+static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
 {
-       netif_stop_subqueue(netdev, tx_ring->queue_index);
+       netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
        /* Herbert's original patch had:
         *  smp_mb__after_netif_stop_queue();
         * but since that doesn't exist yet, just open code it. */
@@ -6240,27 +6362,29 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
                return -EBUSY;
 
        /* A reprieve! - use start_queue because it doesn't call schedule */
-       netif_start_subqueue(netdev, tx_ring->queue_index);
-       ++tx_ring->restart_queue;
+       netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index);
+       ++tx_ring->tx_stats.restart_queue;
        return 0;
 }
 
-static int ixgbe_maybe_stop_tx(struct net_device *netdev,
-                             struct ixgbe_ring *tx_ring, int size)
+static int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, int size)
 {
        if (likely(IXGBE_DESC_UNUSED(tx_ring) >= size))
                return 0;
-       return __ixgbe_maybe_stop_tx(netdev, tx_ring, size);
+       return __ixgbe_maybe_stop_tx(tx_ring, size);
 }
 
 static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
 {
        struct ixgbe_adapter *adapter = netdev_priv(dev);
        int txq = smp_processor_id();
-
 #ifdef IXGBE_FCOE
-       if ((skb->protocol == htons(ETH_P_FCOE)) ||
-           (skb->protocol == htons(ETH_P_FIP))) {
+       __be16 protocol;
+
+       protocol = vlan_get_protocol(skb);
+
+       if ((protocol == htons(ETH_P_FCOE)) ||
+           (protocol == htons(ETH_P_FIP))) {
                if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
                        txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
                        txq += adapter->ring_feature[RING_F_FCOE].mask;
@@ -6292,10 +6416,11 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
        return skb_tx_hash(dev, skb);
 }
 
-netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev,
+netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
                          struct ixgbe_adapter *adapter,
                          struct ixgbe_ring *tx_ring)
 {
+       struct net_device *netdev = tx_ring->netdev;
        struct netdev_queue *txq;
        unsigned int first;
        unsigned int tx_flags = 0;
@@ -6303,6 +6428,9 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
        int tso;
        int count = 0;
        unsigned int f;
+       __be16 protocol;
+
+       protocol = vlan_get_protocol(skb);
 
        if (vlan_tx_tag_present(skb)) {
                tx_flags |= vlan_tx_tag_get(skb);
@@ -6323,8 +6451,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
        /* for FCoE with DCB, we force the priority to what
         * was specified by the switch */
        if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED &&
-           (skb->protocol == htons(ETH_P_FCOE) ||
-            skb->protocol == htons(ETH_P_FIP))) {
+           (protocol == htons(ETH_P_FCOE) ||
+            protocol == htons(ETH_P_FIP))) {
 #ifdef CONFIG_IXGBE_DCB
                if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
                        tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
@@ -6334,7 +6462,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
                }
 #endif
                /* flag for FCoE offloads */
-               if (skb->protocol == htons(ETH_P_FCOE))
+               if (protocol == htons(ETH_P_FCOE))
                        tx_flags |= IXGBE_TX_FLAGS_FCOE;
        }
 #endif
@@ -6350,8 +6478,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
        for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
                count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
 
-       if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) {
-               adapter->tx_busy++;
+       if (ixgbe_maybe_stop_tx(tx_ring, count)) {
+               tx_ring->tx_stats.tx_busy++;
                return NETDEV_TX_BUSY;
        }
 
@@ -6368,9 +6496,10 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
                        tx_flags |= IXGBE_TX_FLAGS_FSO;
 #endif /* IXGBE_FCOE */
        } else {
-               if (skb->protocol == htons(ETH_P_IP))
+               if (protocol == htons(ETH_P_IP))
                        tx_flags |= IXGBE_TX_FLAGS_IPV4;
-               tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
+               tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len,
+                               protocol);
                if (tso < 0) {
                        dev_kfree_skb_any(skb);
                        return NETDEV_TX_OK;
@@ -6378,30 +6507,30 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
 
                if (tso)
                        tx_flags |= IXGBE_TX_FLAGS_TSO;
-               else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) &&
+               else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags,
+                                      protocol) &&
                         (skb->ip_summed == CHECKSUM_PARTIAL))
                        tx_flags |= IXGBE_TX_FLAGS_CSUM;
        }
 
-       count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first);
+       count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len);
        if (count) {
                /* add the ATR filter if ATR is on */
                if (tx_ring->atr_sample_rate) {
                        ++tx_ring->atr_count;
                        if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
-                            test_bit(__IXGBE_FDIR_INIT_DONE,
-                                     &tx_ring->reinit_state)) {
+                            test_bit(__IXGBE_TX_FDIR_INIT_DONE,
+                                     &tx_ring->state)) {
                                ixgbe_atr(adapter, skb, tx_ring->queue_index,
-                                         tx_flags);
+                                         tx_flags, protocol);
                                tx_ring->atr_count = 0;
                        }
                }
                txq = netdev_get_tx_queue(netdev, tx_ring->queue_index);
                txq->tx_bytes += skb->len;
                txq->tx_packets++;
-               ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len,
-                              hdr_len);
-               ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
+               ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len);
+               ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED);
 
        } else {
                dev_kfree_skb_any(skb);
@@ -6418,7 +6547,7 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, struct net_device *netd
        struct ixgbe_ring *tx_ring;
 
        tx_ring = adapter->tx_ring[skb->queue_mapping];
-       return ixgbe_xmit_frame_ring(skb, netdev, adapter, tx_ring);
+       return ixgbe_xmit_frame_ring(skb, adapter, tx_ring);
 }
 
 /**
@@ -6559,20 +6688,23 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
 
        /* accurate rx/tx bytes/packets stats */
        dev_txq_stats_fold(netdev, stats);
+       rcu_read_lock();
        for (i = 0; i < adapter->num_rx_queues; i++) {
-               struct ixgbe_ring *ring = adapter->rx_ring[i];
+               struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
                u64 bytes, packets;
                unsigned int start;
 
-               do {
-                       start = u64_stats_fetch_begin_bh(&ring->syncp);
-                       packets = ring->stats.packets;
-                       bytes   = ring->stats.bytes;
-               } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
-               stats->rx_packets += packets;
-               stats->rx_bytes   += bytes;
+               if (ring) {
+                       do {
+                               start = u64_stats_fetch_begin_bh(&ring->syncp);
+                               packets = ring->stats.packets;
+                               bytes   = ring->stats.bytes;
+                       } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+                       stats->rx_packets += packets;
+                       stats->rx_bytes   += bytes;
+               }
        }
-
+       rcu_read_unlock();
        /* following stats updated by ixgbe_watchdog_task() */
        stats->multicast        = netdev->stats.multicast;
        stats->rx_errors        = netdev->stats.rx_errors;
@@ -6754,8 +6886,8 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
 
        SET_NETDEV_DEV(netdev, &pdev->dev);
 
-       pci_set_drvdata(pdev, netdev);
        adapter = netdev_priv(netdev);
+       pci_set_drvdata(pdev, adapter);
 
        adapter->netdev = netdev;
        adapter->pdev = pdev;
@@ -6953,6 +7085,13 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
                goto err_sw_init;
 
        switch (pdev->device) {
+       case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
+               /* All except this subdevice support WOL */
+               if (pdev->subsystem_device ==
+                   IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) {
+                       adapter->wol = 0;
+                       break;
+               }
        case IXGBE_DEV_ID_82599_KX4:
                adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
                                IXGBE_WUFC_MC | IXGBE_WUFC_BC);
@@ -7078,8 +7217,8 @@ err_dma:
  **/
 static void __devexit ixgbe_remove(struct pci_dev *pdev)
 {
-       struct net_device *netdev = pci_get_drvdata(pdev);
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
+       struct net_device *netdev = adapter->netdev;
 
        set_bit(__IXGBE_DOWN, &adapter->state);
        /* clear the module not found bit to make sure the worker won't
@@ -7149,8 +7288,8 @@ static void __devexit ixgbe_remove(struct pci_dev *pdev)
 static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
                                                pci_channel_state_t state)
 {
-       struct net_device *netdev = pci_get_drvdata(pdev);
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
+       struct net_device *netdev = adapter->netdev;
 
        netif_device_detach(netdev);
 
@@ -7173,8 +7312,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev,
  */
 static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
 {
-       struct net_device *netdev = pci_get_drvdata(pdev);
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
        pci_ers_result_t result;
        int err;
 
@@ -7212,8 +7350,8 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev)
  */
 static void ixgbe_io_resume(struct pci_dev *pdev)
 {
-       struct net_device *netdev = pci_get_drvdata(pdev);
-       struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       struct ixgbe_adapter *adapter = pci_get_drvdata(pdev);
+       struct net_device *netdev = adapter->netdev;
 
        if (netif_running(netdev)) {
                if (ixgbe_up(adapter)) {
@@ -7278,6 +7416,7 @@ static void __exit ixgbe_exit_module(void)
        dca_unregister_notify(&dca_notifier);
 #endif
        pci_unregister_driver(&ixgbe_driver);
+       rcu_barrier(); /* Wait for completion of call_rcu()'s */
 }
 
 #ifdef CONFIG_IXGBE_DCA