]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - drivers/net/ixgbe/ixgbe_main.c
ixgbe: update version number for ixgbe
[net-next-2.6.git] / drivers / net / ixgbe / ixgbe_main.c
index 5dde7d63c3a3f628c8ea5af1a828a7a8cc185dfb..02541956744063359ffaa0120d24484b2e91de3c 100644 (file)
@@ -52,13 +52,14 @@ char ixgbe_driver_name[] = "ixgbe";
 static const char ixgbe_driver_string[] =
                              "Intel(R) 10 Gigabit PCI Express Network Driver";
 
-#define DRV_VERSION "2.0.84-k2"
+#define DRV_VERSION "3.0.12-k2"
 const char ixgbe_driver_version[] = DRV_VERSION;
 static char ixgbe_copyright[] = "Copyright (c) 1999-2010 Intel Corporation.";
 
 static const struct ixgbe_info *ixgbe_info_tbl[] = {
        [board_82598] = &ixgbe_82598_info,
        [board_82599] = &ixgbe_82599_info,
+       [board_X540] = &ixgbe_X540_info,
 };
 
 /* ixgbe_pci_tbl - PCI Device ID Table
@@ -112,6 +113,8 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = {
         board_82599 },
        {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE),
         board_82599 },
+       {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T),
+        board_82599 },
 
        /* required last entry */
        {0, }
@@ -560,6 +563,7 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction,
                IXGBE_WRITE_REG(hw, IXGBE_IVAR(index), ivar);
                break;
        case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
                if (direction == -1) {
                        /* other causes */
                        msix_vector |= IXGBE_IVAR_ALLOC_VAL;
@@ -589,14 +593,20 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
 {
        u32 mask;
 
-       if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_82598EB:
                mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
-       } else {
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
                mask = (qmask & 0xFFFFFFFF);
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
                mask = (qmask >> 32);
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
+               break;
+       default:
+               break;
        }
 }
 
@@ -625,92 +635,166 @@ void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring,
 }
 
 /**
- * ixgbe_tx_xon_state - check the tx ring xon state
- * @adapter: the ixgbe adapter
- * @tx_ring: the corresponding tx_ring
+ * ixgbe_dcb_txq_to_tc - convert a reg index to a traffic class
+ * @adapter: driver private struct
+ * @index: reg idx of queue to query (0-127)
  *
- * If not in DCB mode, checks TFCS.TXOFF, otherwise, find out the
- * corresponding TC of this tx_ring when checking TFCS.
+ * Helper function to determine the traffic index for a paticular
+ * register index.
  *
- * Returns : true if in xon state (currently not paused)
+ * Returns : a tc index for use in range 0-7, or 0-3
  */
-static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter,
-                                     struct ixgbe_ring *tx_ring)
+u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 reg_idx)
 {
-       u32 txoff = IXGBE_TFCS_TXOFF;
+       int tc = -1;
+       int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
 
-#ifdef CONFIG_IXGBE_DCB
-       if (adapter->dcb_cfg.pfc_mode_enable) {
-               int tc;
-               int reg_idx = tx_ring->reg_idx;
-               int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
+       /* if DCB is not enabled the queues have no TC */
+       if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
+               return tc;
+
+       /* check valid range */
+       if (reg_idx >= adapter->hw.mac.max_tx_queues)
+               return tc;
+
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_82598EB:
+               tc = reg_idx >> 2;
+               break;
+       default:
+               if (dcb_i != 4 && dcb_i != 8)
+                       break;
+
+               /* if VMDq is enabled the lowest order bits determine TC */
+               if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED |
+                                     IXGBE_FLAG_VMDQ_ENABLED)) {
+                       tc = reg_idx & (dcb_i - 1);
+                       break;
+               }
+
+               /*
+                * Convert the reg_idx into the correct TC. This bitmask
+                * targets the last full 32 ring traffic class and assigns
+                * it a value of 1. From there the rest of the rings are
+                * based on shifting the mask further up to include the
+                * reg_idx / 16 and then reg_idx / 8. It assumes dcB_i
+                * will only ever be 8 or 4 and that reg_idx will never
+                * be greater then 128. The code without the power of 2
+                * optimizations would be:
+                * (((reg_idx % 32) + 32) * dcb_i) >> (9 - reg_idx / 32)
+                */
+               tc = ((reg_idx & 0X1F) + 0x20) * dcb_i;
+               tc >>= 9 - (reg_idx >> 5);
+       }
+
+       return tc;
+}
+
+static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       struct ixgbe_hw_stats *hwstats = &adapter->stats;
+       u32 data = 0;
+       u32 xoff[8] = {0};
+       int i;
 
-               switch (adapter->hw.mac.type) {
+       if ((hw->fc.current_mode == ixgbe_fc_full) ||
+           (hw->fc.current_mode == ixgbe_fc_rx_pause)) {
+               switch (hw->mac.type) {
                case ixgbe_mac_82598EB:
-                       tc = reg_idx >> 2;
-                       txoff = IXGBE_TFCS_TXOFF0;
+                       data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
                        break;
-               case ixgbe_mac_82599EB:
-                       tc = 0;
-                       txoff = IXGBE_TFCS_TXOFF;
-                       if (dcb_i == 8) {
-                               /* TC0, TC1 */
-                               tc = reg_idx >> 5;
-                               if (tc == 2) /* TC2, TC3 */
-                                       tc += (reg_idx - 64) >> 4;
-                               else if (tc == 3) /* TC4, TC5, TC6, TC7 */
-                                       tc += 1 + ((reg_idx - 96) >> 3);
-                       } else if (dcb_i == 4) {
-                               /* TC0, TC1 */
-                               tc = reg_idx >> 6;
-                               if (tc == 1) {
-                                       tc += (reg_idx - 64) >> 5;
-                                       if (tc == 2) /* TC2, TC3 */
-                                               tc += (reg_idx - 96) >> 4;
-                               }
-                       }
+               default:
+                       data = IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
+               }
+               hwstats->lxoffrxc += data;
+
+               /* refill credits (no tx hang) if we received xoff */
+               if (!data)
+                       return;
+
+               for (i = 0; i < adapter->num_tx_queues; i++)
+                       clear_bit(__IXGBE_HANG_CHECK_ARMED,
+                                 &adapter->tx_ring[i]->state);
+               return;
+       } else if (!(adapter->dcb_cfg.pfc_mode_enable))
+               return;
+
+       /* update stats for each tc, only valid with PFC enabled */
+       for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) {
+               switch (hw->mac.type) {
+               case ixgbe_mac_82598EB:
+                       xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
                        break;
                default:
-                       tc = 0;
+                       xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
                }
-               txoff <<= tc;
+               hwstats->pxoffrxc[i] += xoff[i];
        }
-#endif
-       return IXGBE_READ_REG(&adapter->hw, IXGBE_TFCS) & txoff;
+
+       /* disarm tx queues that have received xoff frames */
+       for (i = 0; i < adapter->num_tx_queues; i++) {
+               struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
+               u32 tc = ixgbe_dcb_txq_to_tc(adapter, tx_ring->reg_idx);
+
+               if (xoff[tc])
+                       clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
+       }
+}
+
+static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring)
+{
+       return ring->tx_stats.completed;
 }
 
-static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
-                                      struct ixgbe_ring *tx_ring,
-                                      unsigned int eop)
+static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring)
 {
+       struct ixgbe_adapter *adapter = netdev_priv(ring->netdev);
        struct ixgbe_hw *hw = &adapter->hw;
 
-       /* Detect a transmit hang in hardware, this serializes the
-        * check with the clearing of time_stamp and movement of eop */
+       u32 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx));
+       u32 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx));
+
+       if (head != tail)
+               return (head < tail) ?
+                       tail - head : (tail + ring->count - head);
+
+       return 0;
+}
+
+static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring)
+{
+       u32 tx_done = ixgbe_get_tx_completed(tx_ring);
+       u32 tx_done_old = tx_ring->tx_stats.tx_done_old;
+       u32 tx_pending = ixgbe_get_tx_pending(tx_ring);
+       bool ret = false;
+
        clear_check_for_tx_hang(tx_ring);
-       if (tx_ring->tx_buffer_info[eop].time_stamp &&
-           time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
-           ixgbe_tx_xon_state(adapter, tx_ring)) {
-               /* detected Tx unit hang */
-               union ixgbe_adv_tx_desc *tx_desc;
-               tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
-               e_err(drv, "Detected Tx Unit Hang\n"
-                     "  Tx Queue             <%d>\n"
-                     "  TDH, TDT             <%x>, <%x>\n"
-                     "  next_to_use          <%x>\n"
-                     "  next_to_clean        <%x>\n"
-                     "tx_buffer_info[next_to_clean]\n"
-                     "  time_stamp           <%lx>\n"
-                     "  jiffies              <%lx>\n",
-                     tx_ring->queue_index,
-                     IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
-                     IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
-                     tx_ring->next_to_use, eop,
-                     tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
-               return true;
+
+       /*
+        * Check for a hung queue, but be thorough. This verifies
+        * that a transmit has been completed since the previous
+        * check AND there is at least one packet pending. The
+        * ARMED bit is set to indicate a potential hang. The
+        * bit is cleared if a pause frame is received to remove
+        * false hang detection due to PFC or 802.3x frames. By
+        * requiring this to fail twice we avoid races with
+        * pfc clearing the ARMED bit and conditions where we
+        * run the check_tx_hang logic with a transmit completion
+        * pending but without time to complete it yet.
+        */
+       if ((tx_done_old == tx_done) && tx_pending) {
+               /* make sure it is true for two checks in a row */
+               ret = test_and_set_bit(__IXGBE_HANG_CHECK_ARMED,
+                                      &tx_ring->state);
+       } else {
+               /* update completed stats and continue */
+               tx_ring->tx_stats.tx_done_old = tx_done;
+               /* reset the countdown */
+               clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state);
        }
 
-       return false;
+       return ret;
 }
 
 #define IXGBE_MAX_TXD_PWR       14
@@ -766,6 +850,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
                                                         tx_buffer_info);
                }
 
+               tx_ring->tx_stats.completed++;
                eop = tx_ring->tx_buffer_info[i].next_to_watch;
                eop_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
        }
@@ -778,11 +863,31 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
        tx_ring->stats.bytes += total_bytes;
        u64_stats_update_end(&tx_ring->syncp);
 
-       if (check_for_tx_hang(tx_ring) &&
-           ixgbe_check_tx_hang(adapter, tx_ring, i)) {
+       if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) {
+               /* schedule immediate reset if we believe we hung */
+               struct ixgbe_hw *hw = &adapter->hw;
+               tx_desc = IXGBE_TX_DESC_ADV(tx_ring, eop);
+               e_err(drv, "Detected Tx Unit Hang\n"
+                       "  Tx Queue             <%d>\n"
+                       "  TDH, TDT             <%x>, <%x>\n"
+                       "  next_to_use          <%x>\n"
+                       "  next_to_clean        <%x>\n"
+                       "tx_buffer_info[next_to_clean]\n"
+                       "  time_stamp           <%lx>\n"
+                       "  jiffies              <%lx>\n",
+                       tx_ring->queue_index,
+                       IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
+                       IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
+                       tx_ring->next_to_use, eop,
+                       tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
+
+               netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
+
+               e_info(probe,
+                      "tx hang %d detected on queue %d, resetting adapter\n",
+                       adapter->tx_timeout_count + 1, tx_ring->queue_index);
+
                /* schedule immediate reset if we believe we hung */
-               e_info(probe, "tx hang %d detected, resetting "
-                      "adapter\n", adapter->tx_timeout_count + 1);
                ixgbe_tx_timeout(adapter->netdev);
 
                /* the adapter is about to reset, no point in enabling stuff */
@@ -822,6 +927,7 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
                rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
                break;
        case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
                rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
                rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
                           IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
@@ -855,6 +961,7 @@ static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
                IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(reg_idx), txctrl);
                break;
        case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
                txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx));
                txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
                txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
@@ -1128,43 +1235,39 @@ no_buffers:
        }
 }
 
-static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
-{
-       return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
-}
-
-static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
-{
-       return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
-}
-
-static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc)
+static inline u16 ixgbe_get_hlen(union ixgbe_adv_rx_desc *rx_desc)
 {
-       return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
-               IXGBE_RXDADV_RSCCNT_MASK) >>
-               IXGBE_RXDADV_RSCCNT_SHIFT;
+       /* HW will not DMA in data larger than the given buffer, even if it
+        * parses the (NFS, of course) header to be larger.  In that case, it
+        * fills the header buffer and spills the rest into the page.
+        */
+       u16 hdr_info = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info);
+       u16 hlen = (hdr_info &  IXGBE_RXDADV_HDRBUFLEN_MASK) >>
+                   IXGBE_RXDADV_HDRBUFLEN_SHIFT;
+       if (hlen > IXGBE_RX_HDR_SIZE)
+               hlen = IXGBE_RX_HDR_SIZE;
+       return hlen;
 }
 
 /**
  * ixgbe_transform_rsc_queue - change rsc queue into a full packet
  * @skb: pointer to the last skb in the rsc queue
- * @count: pointer to number of packets coalesced in this context
  *
  * This function changes a queue full of hw rsc buffers into a completed
  * packet.  It uses the ->prev pointers to find the first packet and then
  * turns it into the frag list owner.
  **/
-static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
-                                                       u64 *count)
+static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb)
 {
        unsigned int frag_list_size = 0;
+       unsigned int skb_cnt = 1;
 
        while (skb->prev) {
                struct sk_buff *prev = skb->prev;
                frag_list_size += skb->len;
                skb->prev = NULL;
                skb = prev;
-               *count += 1;
+               skb_cnt++;
        }
 
        skb_shinfo(skb)->frag_list = skb->next;
@@ -1172,17 +1275,18 @@ static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
        skb->len += frag_list_size;
        skb->data_len += frag_list_size;
        skb->truesize += frag_list_size;
+       IXGBE_RSC_CB(skb)->skb_cnt = skb_cnt;
+
        return skb;
 }
 
-struct ixgbe_rsc_cb {
-       dma_addr_t dma;
-       bool delay_unmap;
-};
-
-#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
+static inline bool ixgbe_get_rsc_state(union ixgbe_adv_rx_desc *rx_desc)
+{
+       return !!(le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
+               IXGBE_RXDADV_RSCCNT_MASK);
+}
 
-static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
+static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                               struct ixgbe_ring *rx_ring,
                               int *work_done, int work_to_do)
 {
@@ -1190,49 +1294,40 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
        union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
        struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
        struct sk_buff *skb;
-       unsigned int i, rsc_count = 0;
-       u32 len, staterr;
-       u16 hdr_info;
-       bool cleaned = false;
-       int cleaned_count = 0;
        unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+       const int current_node = numa_node_id();
 #ifdef IXGBE_FCOE
        int ddp_bytes = 0;
 #endif /* IXGBE_FCOE */
+       u32 staterr;
+       u16 i;
+       u16 cleaned_count = 0;
+       bool pkt_is_rsc = false;
 
        i = rx_ring->next_to_clean;
        rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
        staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
-       rx_buffer_info = &rx_ring->rx_buffer_info[i];
 
        while (staterr & IXGBE_RXD_STAT_DD) {
                u32 upper_len = 0;
-               if (*work_done >= work_to_do)
-                       break;
-               (*work_done)++;
 
                rmb(); /* read descriptor and rx_buffer_info after status DD */
-               if (ring_is_ps_enabled(rx_ring)) {
-                       hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
-                       len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
-                              IXGBE_RXDADV_HDRBUFLEN_SHIFT;
-                       upper_len = le16_to_cpu(rx_desc->wb.upper.length);
-                       if ((len > IXGBE_RX_HDR_SIZE) ||
-                           (upper_len && !(hdr_info & IXGBE_RXDADV_SPH)))
-                               len = IXGBE_RX_HDR_SIZE;
-               } else {
-                       len = le16_to_cpu(rx_desc->wb.upper.length);
-               }
 
-               cleaned = true;
+               rx_buffer_info = &rx_ring->rx_buffer_info[i];
+
                skb = rx_buffer_info->skb;
-               prefetch(skb->data);
                rx_buffer_info->skb = NULL;
+               prefetch(skb->data);
 
+               if (ring_is_rsc_enabled(rx_ring))
+                       pkt_is_rsc = ixgbe_get_rsc_state(rx_desc);
+
+               /* if this is a skb from previous receive DMA will be 0 */
                if (rx_buffer_info->dma) {
-                       if ((adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
-                           (!(staterr & IXGBE_RXD_STAT_EOP)) &&
-                                (!(skb->prev))) {
+                       u16 hlen;
+                       if (pkt_is_rsc &&
+                           !(staterr & IXGBE_RXD_STAT_EOP) &&
+                           !skb->prev) {
                                /*
                                 * When HWRSC is enabled, delay unmapping
                                 * of the first packet. It carries the
@@ -1249,7 +1344,18 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                                                 DMA_FROM_DEVICE);
                        }
                        rx_buffer_info->dma = 0;
-                       skb_put(skb, len);
+
+                       if (ring_is_ps_enabled(rx_ring)) {
+                               hlen = ixgbe_get_hlen(rx_desc);
+                               upper_len = le16_to_cpu(rx_desc->wb.upper.length);
+                       } else {
+                               hlen = le16_to_cpu(rx_desc->wb.upper.length);
+                       }
+
+                       skb_put(skb, hlen);
+               } else {
+                       /* assume packet split since header is unmapped */
+                       upper_len = le16_to_cpu(rx_desc->wb.upper.length);
                }
 
                if (upper_len) {
@@ -1263,11 +1369,11 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                                           rx_buffer_info->page_offset,
                                           upper_len);
 
-                       if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
-                           (page_count(rx_buffer_info->page) != 1))
-                               rx_buffer_info->page = NULL;
-                       else
+                       if ((page_count(rx_buffer_info->page) == 1) &&
+                           (page_to_nid(rx_buffer_info->page) == current_node))
                                get_page(rx_buffer_info->page);
+                       else
+                               rx_buffer_info->page = NULL;
 
                        skb->len += upper_len;
                        skb->data_len += upper_len;
@@ -1282,10 +1388,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                prefetch(next_rxd);
                cleaned_count++;
 
-               if (ring_is_rsc_enabled(rx_ring))
-                       rsc_count = ixgbe_get_rsc_count(rx_desc);
-
-               if (rsc_count) {
+               if (pkt_is_rsc) {
                        u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
                                     IXGBE_RXDADV_NEXTP_SHIFT;
                        next_buffer = &rx_ring->rx_buffer_info[nextp];
@@ -1293,31 +1396,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                        next_buffer = &rx_ring->rx_buffer_info[i];
                }
 
-               if (staterr & IXGBE_RXD_STAT_EOP) {
-                       if (skb->prev)
-                               skb = ixgbe_transform_rsc_queue(skb,
-                                               &(rx_ring->rx_stats.rsc_count));
-                       if (ring_is_rsc_enabled(rx_ring)) {
-                               if (IXGBE_RSC_CB(skb)->delay_unmap) {
-                                       dma_unmap_single(rx_ring->dev,
-                                                        IXGBE_RSC_CB(skb)->dma,
-                                                        rx_ring->rx_buf_len,
-                                                        DMA_FROM_DEVICE);
-                                       IXGBE_RSC_CB(skb)->dma = 0;
-                                       IXGBE_RSC_CB(skb)->delay_unmap = false;
-                               }
-                               if (ring_is_ps_enabled(rx_ring))
-                                       rx_ring->rx_stats.rsc_count +=
-                                                skb_shinfo(skb)->nr_frags;
-                               else
-                                       rx_ring->rx_stats.rsc_count++;
-                               rx_ring->rx_stats.rsc_flush++;
-                       }
-                       u64_stats_update_begin(&rx_ring->syncp);
-                       rx_ring->stats.packets++;
-                       rx_ring->stats.bytes += skb->len;
-                       u64_stats_update_end(&rx_ring->syncp);
-               } else {
+               if (!(staterr & IXGBE_RXD_STAT_EOP)) {
                        if (ring_is_ps_enabled(rx_ring)) {
                                rx_buffer_info->skb = next_buffer->skb;
                                rx_buffer_info->dma = next_buffer->dma;
@@ -1331,8 +1410,41 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                        goto next_desc;
                }
 
+               if (skb->prev) {
+                       skb = ixgbe_transform_rsc_queue(skb);
+                       /* if we got here without RSC the packet is invalid */
+                       if (!pkt_is_rsc) {
+                               __pskb_trim(skb, 0);
+                               rx_buffer_info->skb = skb;
+                               goto next_desc;
+                       }
+               }
+
+               if (ring_is_rsc_enabled(rx_ring)) {
+                       if (IXGBE_RSC_CB(skb)->delay_unmap) {
+                               dma_unmap_single(rx_ring->dev,
+                                                IXGBE_RSC_CB(skb)->dma,
+                                                rx_ring->rx_buf_len,
+                                                DMA_FROM_DEVICE);
+                               IXGBE_RSC_CB(skb)->dma = 0;
+                               IXGBE_RSC_CB(skb)->delay_unmap = false;
+                       }
+               }
+               if (pkt_is_rsc) {
+                       if (ring_is_ps_enabled(rx_ring))
+                               rx_ring->rx_stats.rsc_count +=
+                                       skb_shinfo(skb)->nr_frags;
+                       else
+                               rx_ring->rx_stats.rsc_count +=
+                                       IXGBE_RSC_CB(skb)->skb_cnt;
+                       rx_ring->rx_stats.rsc_flush++;
+               }
+
+               /* ERR_MASK will only have valid bits if EOP set */
                if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
-                       dev_kfree_skb_irq(skb);
+                       /* trim packet back to size 0 and recycle it */
+                       __pskb_trim(skb, 0);
+                       rx_buffer_info->skb = skb;
                        goto next_desc;
                }
 
@@ -1356,6 +1468,10 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
 next_desc:
                rx_desc->wb.upper.status_error = 0;
 
+               (*work_done)++;
+               if (*work_done >= work_to_do)
+                       break;
+
                /* return some buffers to hardware, one at a time is too slow */
                if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
                        ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
@@ -1364,8 +1480,6 @@ next_desc:
 
                /* use prefetched values */
                rx_desc = next_rxd;
-               rx_buffer_info = &rx_ring->rx_buffer_info[i];
-
                staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
        }
 
@@ -1392,8 +1506,10 @@ next_desc:
 
        rx_ring->total_packets += total_rx_packets;
        rx_ring->total_bytes += total_rx_bytes;
-
-       return cleaned;
+       u64_stats_update_begin(&rx_ring->syncp);
+       rx_ring->stats.packets += total_rx_packets;
+       rx_ring->stats.bytes += total_rx_bytes;
+       u64_stats_update_end(&rx_ring->syncp);
 }
 
 static int ixgbe_clean_rxonly(struct napi_struct *, int);
@@ -1407,7 +1523,7 @@ static int ixgbe_clean_rxonly(struct napi_struct *, int);
 static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_q_vector *q_vector;
-       int i, j, q_vectors, v_idx, r_idx;
+       int i, q_vectors, v_idx, r_idx;
        u32 mask;
 
        q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
@@ -1423,8 +1539,8 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
                                       adapter->num_rx_queues);
 
                for (i = 0; i < q_vector->rxr_count; i++) {
-                       j = adapter->rx_ring[r_idx]->reg_idx;
-                       ixgbe_set_ivar(adapter, 0, j, v_idx);
+                       u8 reg_idx = adapter->rx_ring[r_idx]->reg_idx;
+                       ixgbe_set_ivar(adapter, 0, reg_idx, v_idx);
                        r_idx = find_next_bit(q_vector->rxr_idx,
                                              adapter->num_rx_queues,
                                              r_idx + 1);
@@ -1433,8 +1549,8 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
                                       adapter->num_tx_queues);
 
                for (i = 0; i < q_vector->txr_count; i++) {
-                       j = adapter->tx_ring[r_idx]->reg_idx;
-                       ixgbe_set_ivar(adapter, 1, j, v_idx);
+                       u8 reg_idx = adapter->tx_ring[r_idx]->reg_idx;
+                       ixgbe_set_ivar(adapter, 1, reg_idx, v_idx);
                        r_idx = find_next_bit(q_vector->txr_idx,
                                              adapter->num_tx_queues,
                                              r_idx + 1);
@@ -1465,11 +1581,19 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
                }
        }
 
-       if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_82598EB:
                ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
                               v_idx);
-       else if (adapter->hw.mac.type == ixgbe_mac_82599EB)
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
                ixgbe_set_ivar(adapter, -1, 1, v_idx);
+               break;
+
+       default:
+               break;
+       }
        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
 
        /* set up to autoclear timer, and the vectors */
@@ -1565,12 +1689,15 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
        int v_idx = q_vector->v_idx;
        u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr);
 
-       if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_82598EB:
                /* must write high and low 16 bits to reset counter */
                itr_reg |= (itr_reg << 16);
-       } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
                /*
-                * 82599 can support a value of zero, so allow it for
+                * 82599 and X540 can support a value of zero, so allow it for
                 * max interrupt rate, but there is an errata where it can
                 * not be zero with RSC
                 */
@@ -1583,6 +1710,9 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
                 * immediate assertion of the interrupt
                 */
                itr_reg |= IXGBE_EITR_CNT_WDIS;
+               break;
+       default:
+               break;
        }
        IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
 }
@@ -1590,14 +1720,13 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
 static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
 {
        struct ixgbe_adapter *adapter = q_vector->adapter;
+       int i, r_idx;
        u32 new_itr;
        u8 current_itr, ret_itr;
-       int i, r_idx;
-       struct ixgbe_ring *rx_ring, *tx_ring;
 
        r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
        for (i = 0; i < q_vector->txr_count; i++) {
-               tx_ring = adapter->tx_ring[r_idx];
+               struct ixgbe_ring *tx_ring = adapter->tx_ring[r_idx];
                ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
                                           q_vector->tx_itr,
                                           tx_ring->total_packets,
@@ -1612,7 +1741,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
 
        r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
        for (i = 0; i < q_vector->rxr_count; i++) {
-               rx_ring = adapter->rx_ring[r_idx];
+               struct ixgbe_ring *rx_ring = adapter->rx_ring[r_idx];
                ret_itr = ixgbe_update_itr(adapter, q_vector->eitr,
                                           q_vector->rx_itr,
                                           rx_ring->total_packets,
@@ -1643,7 +1772,7 @@ static void ixgbe_set_itr_msix(struct ixgbe_q_vector *q_vector)
 
        if (new_itr != q_vector->eitr) {
                /* do an exponential smoothing */
-               new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
+               new_itr = ((q_vector->eitr * 9) + new_itr)/10;
 
                /* save the algorithm value here, not the smoothed one */
                q_vector->eitr = new_itr;
@@ -1762,16 +1891,9 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
        if (eicr & IXGBE_EICR_MAILBOX)
                ixgbe_msg_task(adapter);
 
-       if (hw->mac.type == ixgbe_mac_82598EB)
-               ixgbe_check_fan_failure(adapter, eicr);
-
-       if (hw->mac.type == ixgbe_mac_82599EB) {
-               ixgbe_check_sfp_event(adapter, eicr);
-               adapter->interrupt_event = eicr;
-               if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
-                   ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)))
-                       schedule_work(&adapter->check_overtemp_task);
-
+       switch (hw->mac.type) {
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
                /* Handle Flow Director Full threshold interrupt */
                if (eicr & IXGBE_EICR_FLOW_DIR) {
                        int i;
@@ -1786,7 +1908,19 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
                                        schedule_work(&adapter->fdir_reinit_task);
                        }
                }
+               ixgbe_check_sfp_event(adapter, eicr);
+               if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
+                   ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
+                       adapter->interrupt_event = eicr;
+                       schedule_work(&adapter->check_overtemp_task);
+               }
+               break;
+       default:
+               break;
        }
+
+       ixgbe_check_fan_failure(adapter, eicr);
+
        if (!test_bit(__IXGBE_DOWN, &adapter->state))
                IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
 
@@ -1797,15 +1931,24 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
                                           u64 qmask)
 {
        u32 mask;
+       struct ixgbe_hw *hw = &adapter->hw;
 
-       if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
                mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
-       } else {
+               IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
                mask = (qmask & 0xFFFFFFFF);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask);
+               if (mask)
+                       IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
                mask = (qmask >> 32);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask);
+               if (mask)
+                       IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
+               break;
+       default:
+               break;
        }
        /* skip the flush */
 }
@@ -1814,15 +1957,24 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
                                            u64 qmask)
 {
        u32 mask;
+       struct ixgbe_hw *hw = &adapter->hw;
 
-       if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
                mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask);
-       } else {
+               IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
                mask = (qmask & 0xFFFFFFFF);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), mask);
+               if (mask)
+                       IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
                mask = (qmask >> 32);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), mask);
+               if (mask)
+                       IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
+               break;
+       default:
+               break;
        }
        /* skip the flush */
 }
@@ -2067,24 +2219,27 @@ static inline void map_vector_to_rxq(struct ixgbe_adapter *a, int v_idx,
                                     int r_idx)
 {
        struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
+       struct ixgbe_ring *rx_ring = a->rx_ring[r_idx];
 
        set_bit(r_idx, q_vector->rxr_idx);
        q_vector->rxr_count++;
+       rx_ring->q_vector = q_vector;
 }
 
 static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
                                     int t_idx)
 {
        struct ixgbe_q_vector *q_vector = a->q_vector[v_idx];
+       struct ixgbe_ring *tx_ring = a->tx_ring[t_idx];
 
        set_bit(t_idx, q_vector->txr_idx);
        q_vector->txr_count++;
+       tx_ring->q_vector = q_vector;
 }
 
 /**
  * ixgbe_map_rings_to_vectors - Maps descriptor rings to vectors
  * @adapter: board private structure to initialize
- * @vectors: allotted vector count for descriptor rings
  *
  * This function maps descriptor rings to the queue-specific vectors
  * we were allotted through the MSI-X enabling code.  Ideally, we'd have
@@ -2092,9 +2247,9 @@ static inline void map_vector_to_txq(struct ixgbe_adapter *a, int v_idx,
  * group the rings as "efficiently" as possible.  You would add new
  * mapping configurations in here.
  **/
-static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
-                                     int vectors)
+static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter)
 {
+       int q_vectors;
        int v_start = 0;
        int rxr_idx = 0, txr_idx = 0;
        int rxr_remaining = adapter->num_rx_queues;
@@ -2107,11 +2262,13 @@ static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
        if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED))
                goto out;
 
+       q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+
        /*
         * The ideal configuration...
         * We have enough vectors to map one per queue.
         */
-       if (vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
+       if (q_vectors == adapter->num_rx_queues + adapter->num_tx_queues) {
                for (; rxr_idx < rxr_remaining; v_start++, rxr_idx++)
                        map_vector_to_rxq(adapter, v_start, rxr_idx);
 
@@ -2127,23 +2284,20 @@ static int ixgbe_map_rings_to_vectors(struct ixgbe_adapter *adapter,
         * multiple queues per vector.
         */
        /* Re-adjusting *qpv takes care of the remainder. */
-       for (i = v_start; i < vectors; i++) {
-               rqpv = DIV_ROUND_UP(rxr_remaining, vectors - i);
+       for (i = v_start; i < q_vectors; i++) {
+               rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - i);
                for (j = 0; j < rqpv; j++) {
                        map_vector_to_rxq(adapter, i, rxr_idx);
                        rxr_idx++;
                        rxr_remaining--;
                }
-       }
-       for (i = v_start; i < vectors; i++) {
-               tqpv = DIV_ROUND_UP(txr_remaining, vectors - i);
+               tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - i);
                for (j = 0; j < tqpv; j++) {
                        map_vector_to_txq(adapter, i, txr_idx);
                        txr_idx++;
                        txr_remaining--;
                }
        }
-
 out:
        return err;
 }
@@ -2165,32 +2319,36 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
        /* Decrement for Other and TCP Timer vectors */
        q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
 
-       /* Map the Tx/Rx rings to the vectors we were allotted. */
-       err = ixgbe_map_rings_to_vectors(adapter, q_vectors);
+       err = ixgbe_map_rings_to_vectors(adapter);
        if (err)
-               goto out;
+               return err;
 
-#define SET_HANDLER(_v) ((!(_v)->rxr_count) ? &ixgbe_msix_clean_tx : \
-                        (!(_v)->txr_count) ? &ixgbe_msix_clean_rx : \
-                        &ixgbe_msix_clean_many)
+#define SET_HANDLER(_v) (((_v)->rxr_count && (_v)->txr_count)        \
+                                         ? &ixgbe_msix_clean_many : \
+                         (_v)->rxr_count ? &ixgbe_msix_clean_rx   : \
+                         (_v)->txr_count ? &ixgbe_msix_clean_tx   : \
+                         NULL)
        for (vector = 0; vector < q_vectors; vector++) {
-               handler = SET_HANDLER(adapter->q_vector[vector]);
+               struct ixgbe_q_vector *q_vector = adapter->q_vector[vector];
+               handler = SET_HANDLER(q_vector);
 
                if (handler == &ixgbe_msix_clean_rx) {
-                       sprintf(adapter->name[vector], "%s-%s-%d",
+                       sprintf(q_vector->name, "%s-%s-%d",
                                netdev->name, "rx", ri++);
                } else if (handler == &ixgbe_msix_clean_tx) {
-                       sprintf(adapter->name[vector], "%s-%s-%d",
+                       sprintf(q_vector->name, "%s-%s-%d",
                                netdev->name, "tx", ti++);
-               } else {
-                       sprintf(adapter->name[vector], "%s-%s-%d",
+               } else if (handler == &ixgbe_msix_clean_many) {
+                       sprintf(q_vector->name, "%s-%s-%d",
                                netdev->name, "TxRx", ri++);
                        ti++;
+               } else {
+                       /* skip this unused q_vector */
+                       continue;
                }
-
                err = request_irq(adapter->msix_entries[vector].vector,
-                                 handler, 0, adapter->name[vector],
-                                 adapter->q_vector[vector]);
+                                 handler, 0, q_vector->name,
+                                 q_vector);
                if (err) {
                        e_err(probe, "request_irq failed for MSIX interrupt "
                              "Error: %d\n", err);
@@ -2198,9 +2356,9 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
                }
        }
 
-       sprintf(adapter->name[vector], "%s:lsc", netdev->name);
+       sprintf(adapter->lsc_int_name, "%s:lsc", netdev->name);
        err = request_irq(adapter->msix_entries[vector].vector,
-                         ixgbe_msix_lsc, 0, adapter->name[vector], netdev);
+                         ixgbe_msix_lsc, 0, adapter->lsc_int_name, netdev);
        if (err) {
                e_err(probe, "request_irq for msix_lsc failed: %d\n", err);
                goto free_queue_irqs;
@@ -2216,17 +2374,16 @@ free_queue_irqs:
        pci_disable_msix(adapter->pdev);
        kfree(adapter->msix_entries);
        adapter->msix_entries = NULL;
-out:
        return err;
 }
 
 static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_q_vector *q_vector = adapter->q_vector[0];
-       u8 current_itr;
-       u32 new_itr = q_vector->eitr;
        struct ixgbe_ring *rx_ring = adapter->rx_ring[0];
        struct ixgbe_ring *tx_ring = adapter->tx_ring[0];
+       u32 new_itr = q_vector->eitr;
+       u8 current_itr;
 
        q_vector->tx_itr = ixgbe_update_itr(adapter, new_itr,
                                            q_vector->tx_itr,
@@ -2256,9 +2413,9 @@ static void ixgbe_set_itr(struct ixgbe_adapter *adapter)
 
        if (new_itr != q_vector->eitr) {
                /* do an exponential smoothing */
-               new_itr = ((q_vector->eitr * 90)/100) + ((new_itr * 10)/100);
+               new_itr = ((q_vector->eitr * 9) + new_itr)/10;
 
-               /* save the algorithm value here, not the smoothed one */
+               /* save the algorithm value here */
                q_vector->eitr = new_itr;
 
                ixgbe_write_eitr(q_vector);
@@ -2279,12 +2436,17 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
                mask |= IXGBE_EIMS_GPI_SDP0;
        if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
                mask |= IXGBE_EIMS_GPI_SDP1;
-       if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
                mask |= IXGBE_EIMS_ECC;
                mask |= IXGBE_EIMS_GPI_SDP1;
                mask |= IXGBE_EIMS_GPI_SDP2;
                if (adapter->num_vfs)
                        mask |= IXGBE_EIMS_MAILBOX;
+               break;
+       default:
+               break;
        }
        if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
            adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
@@ -2340,13 +2502,21 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
        if (eicr & IXGBE_EICR_LSC)
                ixgbe_check_lsc(adapter);
 
-       if (hw->mac.type == ixgbe_mac_82599EB)
+       switch (hw->mac.type) {
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
                ixgbe_check_sfp_event(adapter, eicr);
+               if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
+                   ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
+                       adapter->interrupt_event = eicr;
+                       schedule_work(&adapter->check_overtemp_task);
+               }
+               break;
+       default:
+               break;
+       }
 
        ixgbe_check_fan_failure(adapter, eicr);
-       if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
-           ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)))
-               schedule_work(&adapter->check_overtemp_task);
 
        if (napi_schedule_prep(&(q_vector->napi))) {
                adapter->tx_ring[0]->total_packets = 0;
@@ -2439,14 +2609,20 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
  **/
 static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
 {
-       if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_82598EB:
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
-       } else {
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
                if (adapter->num_vfs > 32)
                        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
+               break;
+       default:
+               break;
        }
        IXGBE_WRITE_FLUSH(&adapter->hw);
        if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
@@ -2492,7 +2668,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
        u64 tdba = ring->dma;
        int wait_loop = 10;
        u32 txdctl;
-       u16 reg_idx = ring->reg_idx;
+       u8 reg_idx = ring->reg_idx;
 
        /* disable queue to avoid issues while updating state */
        txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
@@ -2523,7 +2699,16 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
        }
 
        /* reinitialize flowdirector state */
-       set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
+       if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
+           adapter->atr_sample_rate) {
+               ring->atr_sample_rate = adapter->atr_sample_rate;
+               ring->atr_count = 0;
+               set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
+       } else {
+               ring->atr_sample_rate = 0;
+       }
+
+       clear_bit(__IXGBE_HANG_CHECK_ARMED, &ring->state);
 
        /* enable queue */
        txdctl |= IXGBE_TXDCTL_ENABLE;
@@ -2614,16 +2799,22 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
                                   struct ixgbe_ring *rx_ring)
 {
        u32 srrctl;
-       int index;
-       struct ixgbe_ring_feature *feature = adapter->ring_feature;
+       u8 reg_idx = rx_ring->reg_idx;
 
-       index = rx_ring->reg_idx;
-       if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
-               unsigned long mask;
-               mask = (unsigned long) feature[RING_F_RSS].mask;
-               index = index & mask;
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_82598EB: {
+               struct ixgbe_ring_feature *feature = adapter->ring_feature;
+               const int mask = feature[RING_F_RSS].mask;
+               reg_idx = reg_idx & mask;
        }
-       srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index));
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+       default:
+               break;
+       }
+
+       srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx));
 
        srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
        srrctl &= ~IXGBE_SRRCTL_BSIZEPKT_MASK;
@@ -2646,7 +2837,7 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
                srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF;
        }
 
-       IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(index), srrctl);
+       IXGBE_WRITE_REG(&adapter->hw, IXGBE_SRRCTL(reg_idx), srrctl);
 }
 
 static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
@@ -2715,18 +2906,35 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter)
        IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc);
 }
 
+/**
+ * ixgbe_clear_rscctl - disable RSC for the indicated ring
+ * @adapter: address of board private structure
+ * @ring: structure containing ring specific data
+ **/
+void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter,
+                        struct ixgbe_ring *ring)
+{
+       struct ixgbe_hw *hw = &adapter->hw;
+       u32 rscctrl;
+       u8 reg_idx = ring->reg_idx;
+
+       rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx));
+       rscctrl &= ~IXGBE_RSCCTL_RSCEN;
+       IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl);
+}
+
 /**
  * ixgbe_configure_rscctl - enable RSC for the indicated ring
  * @adapter:    address of board private structure
  * @index:      index of ring to set
  **/
-static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
+void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter,
                                   struct ixgbe_ring *ring)
 {
        struct ixgbe_hw *hw = &adapter->hw;
        u32 rscctrl;
        int rx_buf_len;
-       u16 reg_idx = ring->reg_idx;
+       u8 reg_idx = ring->reg_idx;
 
        if (!ring_is_rsc_enabled(ring))
                return;
@@ -2792,9 +3000,9 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter,
                                       struct ixgbe_ring *ring)
 {
        struct ixgbe_hw *hw = &adapter->hw;
-       int reg_idx = ring->reg_idx;
        int wait_loop = IXGBE_MAX_RX_DESC_POLL;
        u32 rxdctl;
+       u8 reg_idx = ring->reg_idx;
 
        /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */
        if (hw->mac.type == ixgbe_mac_82598EB &&
@@ -2818,7 +3026,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
        struct ixgbe_hw *hw = &adapter->hw;
        u64 rdba = ring->dma;
        u32 rxdctl;
-       u16 reg_idx = ring->reg_idx;
+       u8 reg_idx = ring->reg_idx;
 
        /* disable queue to avoid issues while updating state */
        rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx));
@@ -3003,7 +3211,6 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
                }
 #endif /* IXGBE_FCOE */
        }
-
 }
 
 static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
@@ -3026,6 +3233,7 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
                rdrxctl |= IXGBE_RDRXCTL_MVMEN;
                break;
        case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
                /* Disable RSC for ACK packets */
                IXGBE_WRITE_REG(hw, IXGBE_RSCDBU,
                   (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU)));
@@ -3153,6 +3361,7 @@ static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter)
                IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
                break;
        case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
                for (i = 0; i < adapter->num_rx_queues; i++) {
                        j = adapter->rx_ring[i]->reg_idx;
                        vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
@@ -3182,6 +3391,7 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter)
                IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl);
                break;
        case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
                for (i = 0; i < adapter->num_rx_queues; i++) {
                        j = adapter->rx_ring[i]->reg_idx;
                        vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j));
@@ -3537,8 +3747,9 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter)
                case ixgbe_mac_82598EB:
                        IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE);
                        break;
-               default:
                case ixgbe_mac_82599EB:
+               case ixgbe_mac_X540:
+               default:
                        IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF);
                        IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF);
                        break;
@@ -3583,7 +3794,7 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
                ixgbe_configure_msi_and_legacy(adapter);
 
        /* enable the optics */
-       if (hw->phy.multispeed_fiber)
+       if (hw->phy.multispeed_fiber && hw->mac.ops.enable_tx_laser)
                hw->mac.ops.enable_tx_laser(hw);
 
        clear_bit(__IXGBE_DOWN, &adapter->state);
@@ -3820,7 +4031,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
        struct ixgbe_hw *hw = &adapter->hw;
        u32 rxctrl;
        u32 txdctl;
-       int i, j;
+       int i;
        int num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
 
        /* signal that we are down to the interrupt handler */
@@ -3878,19 +4089,25 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
 
        /* disable transmits in the hardware now that interrupts are off */
        for (i = 0; i < adapter->num_tx_queues; i++) {
-               j = adapter->tx_ring[i]->reg_idx;
-               txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(j));
-               IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(j),
+               u8 reg_idx = adapter->tx_ring[i]->reg_idx;
+               txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx));
+               IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx),
                                (txdctl & ~IXGBE_TXDCTL_ENABLE));
        }
        /* Disable the Tx DMA engine on 82599 */
-       if (hw->mac.type == ixgbe_mac_82599EB)
+       switch (hw->mac.type) {
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
                IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
                                (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
                                 ~IXGBE_DMATXCTL_TE));
+               break;
+       default:
+               break;
+       }
 
        /* power down the optics */
-       if (hw->phy.multispeed_fiber)
+       if (hw->phy.multispeed_fiber && hw->mac.ops.disable_tx_laser)
                hw->mac.ops.disable_tx_laser(hw);
 
        /* clear n-tuple filters that are cached */
@@ -3951,6 +4168,8 @@ static void ixgbe_tx_timeout(struct net_device *netdev)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
 
+       adapter->tx_timeout_count++;
+
        /* Do the reset outside of interrupt context */
        schedule_work(&adapter->reset_task);
 }
@@ -3965,8 +4184,6 @@ static void ixgbe_reset_task(struct work_struct *work)
            test_bit(__IXGBE_RESETTING, &adapter->state))
                return;
 
-       adapter->tx_timeout_count++;
-
        ixgbe_dump(adapter);
        netdev_err(adapter->netdev, "Reset adapter\n");
        ixgbe_reinit_locked(adapter);
@@ -4216,19 +4433,16 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
 static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
 {
        int i;
-       bool ret = false;
 
-       if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
-               for (i = 0; i < adapter->num_rx_queues; i++)
-                       adapter->rx_ring[i]->reg_idx = i;
-               for (i = 0; i < adapter->num_tx_queues; i++)
-                       adapter->tx_ring[i]->reg_idx = i;
-               ret = true;
-       } else {
-               ret = false;
-       }
+       if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
+               return false;
 
-       return ret;
+       for (i = 0; i < adapter->num_rx_queues; i++)
+               adapter->rx_ring[i]->reg_idx = i;
+       for (i = 0; i < adapter->num_tx_queues; i++)
+               adapter->tx_ring[i]->reg_idx = i;
+
+       return true;
 }
 
 #ifdef CONFIG_IXGBE_DCB
@@ -4245,71 +4459,67 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
        bool ret = false;
        int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
 
-       if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
-               if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
-                       /* the number of queues is assumed to be symmetric */
-                       for (i = 0; i < dcb_i; i++) {
-                               adapter->rx_ring[i]->reg_idx = i << 3;
-                               adapter->tx_ring[i]->reg_idx = i << 2;
-                       }
-                       ret = true;
-               } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
-                       if (dcb_i == 8) {
-                               /*
-                                * Tx TC0 starts at: descriptor queue 0
-                                * Tx TC1 starts at: descriptor queue 32
-                                * Tx TC2 starts at: descriptor queue 64
-                                * Tx TC3 starts at: descriptor queue 80
-                                * Tx TC4 starts at: descriptor queue 96
-                                * Tx TC5 starts at: descriptor queue 104
-                                * Tx TC6 starts at: descriptor queue 112
-                                * Tx TC7 starts at: descriptor queue 120
-                                *
-                                * Rx TC0-TC7 are offset by 16 queues each
-                                */
-                               for (i = 0; i < 3; i++) {
-                                       adapter->tx_ring[i]->reg_idx = i << 5;
-                                       adapter->rx_ring[i]->reg_idx = i << 4;
-                               }
-                               for ( ; i < 5; i++) {
-                                       adapter->tx_ring[i]->reg_idx =
-                                                                ((i + 2) << 4);
-                                       adapter->rx_ring[i]->reg_idx = i << 4;
-                               }
-                               for ( ; i < dcb_i; i++) {
-                                       adapter->tx_ring[i]->reg_idx =
-                                                                ((i + 8) << 3);
-                                       adapter->rx_ring[i]->reg_idx = i << 4;
-                               }
+       if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
+               return false;
 
-                               ret = true;
-                       } else if (dcb_i == 4) {
-                               /*
-                                * Tx TC0 starts at: descriptor queue 0
-                                * Tx TC1 starts at: descriptor queue 64
-                                * Tx TC2 starts at: descriptor queue 96
-                                * Tx TC3 starts at: descriptor queue 112
-                                *
-                                * Rx TC0-TC3 are offset by 32 queues each
-                                */
-                               adapter->tx_ring[0]->reg_idx = 0;
-                               adapter->tx_ring[1]->reg_idx = 64;
-                               adapter->tx_ring[2]->reg_idx = 96;
-                               adapter->tx_ring[3]->reg_idx = 112;
-                               for (i = 0 ; i < dcb_i; i++)
-                                       adapter->rx_ring[i]->reg_idx = i << 5;
-
-                               ret = true;
-                       } else {
-                               ret = false;
+       /* the number of queues is assumed to be symmetric */
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_82598EB:
+               for (i = 0; i < dcb_i; i++) {
+                       adapter->rx_ring[i]->reg_idx = i << 3;
+                       adapter->tx_ring[i]->reg_idx = i << 2;
+               }
+               ret = true;
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+               if (dcb_i == 8) {
+                       /*
+                        * Tx TC0 starts at: descriptor queue 0
+                        * Tx TC1 starts at: descriptor queue 32
+                        * Tx TC2 starts at: descriptor queue 64
+                        * Tx TC3 starts at: descriptor queue 80
+                        * Tx TC4 starts at: descriptor queue 96
+                        * Tx TC5 starts at: descriptor queue 104
+                        * Tx TC6 starts at: descriptor queue 112
+                        * Tx TC7 starts at: descriptor queue 120
+                        *
+                        * Rx TC0-TC7 are offset by 16 queues each
+                        */
+                       for (i = 0; i < 3; i++) {
+                               adapter->tx_ring[i]->reg_idx = i << 5;
+                               adapter->rx_ring[i]->reg_idx = i << 4;
                        }
-               } else {
-                       ret = false;
+                       for ( ; i < 5; i++) {
+                               adapter->tx_ring[i]->reg_idx = ((i + 2) << 4);
+                               adapter->rx_ring[i]->reg_idx = i << 4;
+                       }
+                       for ( ; i < dcb_i; i++) {
+                               adapter->tx_ring[i]->reg_idx = ((i + 8) << 3);
+                               adapter->rx_ring[i]->reg_idx = i << 4;
+                       }
+                       ret = true;
+               } else if (dcb_i == 4) {
+                       /*
+                        * Tx TC0 starts at: descriptor queue 0
+                        * Tx TC1 starts at: descriptor queue 64
+                        * Tx TC2 starts at: descriptor queue 96
+                        * Tx TC3 starts at: descriptor queue 112
+                        *
+                        * Rx TC0-TC3 are offset by 32 queues each
+                        */
+                       adapter->tx_ring[0]->reg_idx = 0;
+                       adapter->tx_ring[1]->reg_idx = 64;
+                       adapter->tx_ring[2]->reg_idx = 96;
+                       adapter->tx_ring[3]->reg_idx = 112;
+                       for (i = 0 ; i < dcb_i; i++)
+                               adapter->rx_ring[i]->reg_idx = i << 5;
+                       ret = true;
                }
-       } else {
-               ret = false;
+               break;
+       default:
+               break;
        }
-
        return ret;
 }
 #endif
@@ -4349,55 +4559,55 @@ static inline bool ixgbe_cache_ring_fdir(struct ixgbe_adapter *adapter)
  */
 static inline bool ixgbe_cache_ring_fcoe(struct ixgbe_adapter *adapter)
 {
-       int i, fcoe_rx_i = 0, fcoe_tx_i = 0;
-       bool ret = false;
        struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE];
+       int i;
+       u8 fcoe_rx_i = 0, fcoe_tx_i = 0;
+
+       if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED))
+               return false;
 
-       if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
 #ifdef CONFIG_IXGBE_DCB
-               if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
-                       struct ixgbe_fcoe *fcoe = &adapter->fcoe;
+       if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
+               struct ixgbe_fcoe *fcoe = &adapter->fcoe;
 
-                       ixgbe_cache_ring_dcb(adapter);
-                       /* find out queues in TC for FCoE */
-                       fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1;
-                       fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1;
-                       /*
-                        * In 82599, the number of Tx queues for each traffic
-                        * class for both 8-TC and 4-TC modes are:
-                        * TCs  : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7
-                        * 8 TCs:  32  32  16  16   8   8   8   8
-                        * 4 TCs:  64  64  32  32
-                        * We have max 8 queues for FCoE, where 8 the is
-                        * FCoE redirection table size. If TC for FCoE is
-                        * less than or equal to TC3, we have enough queues
-                        * to add max of 8 queues for FCoE, so we start FCoE
-                        * tx descriptor from the next one, i.e., reg_idx + 1.
-                        * If TC for FCoE is above TC3, implying 8 TC mode,
-                        * and we need 8 for FCoE, we have to take all queues
-                        * in that traffic class for FCoE.
-                        */
-                       if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3))
-                               fcoe_tx_i--;
-               }
+               ixgbe_cache_ring_dcb(adapter);
+               /* find out queues in TC for FCoE */
+               fcoe_rx_i = adapter->rx_ring[fcoe->tc]->reg_idx + 1;
+               fcoe_tx_i = adapter->tx_ring[fcoe->tc]->reg_idx + 1;
+               /*
+                * In 82599, the number of Tx queues for each traffic
+                * class for both 8-TC and 4-TC modes are:
+                * TCs  : TC0 TC1 TC2 TC3 TC4 TC5 TC6 TC7
+                * 8 TCs:  32  32  16  16   8   8   8   8
+                * 4 TCs:  64  64  32  32
+                * We have max 8 queues for FCoE, where 8 the is
+                * FCoE redirection table size. If TC for FCoE is
+                * less than or equal to TC3, we have enough queues
+                * to add max of 8 queues for FCoE, so we start FCoE
+                * Tx queue from the next one, i.e., reg_idx + 1.
+                * If TC for FCoE is above TC3, implying 8 TC mode,
+                * and we need 8 for FCoE, we have to take all queues
+                * in that traffic class for FCoE.
+                */
+               if ((f->indices == IXGBE_FCRETA_SIZE) && (fcoe->tc > 3))
+                       fcoe_tx_i--;
+       }
 #endif /* CONFIG_IXGBE_DCB */
-               if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
-                       if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
-                           (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
-                               ixgbe_cache_ring_fdir(adapter);
-                       else
-                               ixgbe_cache_ring_rss(adapter);
+       if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
+               if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ||
+                   (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE))
+                       ixgbe_cache_ring_fdir(adapter);
+               else
+                       ixgbe_cache_ring_rss(adapter);
 
-                       fcoe_rx_i = f->mask;
-                       fcoe_tx_i = f->mask;
-               }
-               for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
-                       adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
-                       adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
-               }
-               ret = true;
+               fcoe_rx_i = f->mask;
+               fcoe_tx_i = f->mask;
        }
-       return ret;
+       for (i = 0; i < f->indices; i++, fcoe_rx_i++, fcoe_tx_i++) {
+               adapter->rx_ring[f->mask + i]->reg_idx = fcoe_rx_i;
+               adapter->tx_ring[f->mask + i]->reg_idx = fcoe_tx_i;
+       }
+       return true;
 }
 
 #endif /* IXGBE_FCOE */
@@ -4466,71 +4676,55 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
  **/
 static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
 {
-       int i;
-       int rx_count;
-       int orig_node = adapter->node;
+       int rx = 0, tx = 0, nid = adapter->node;
 
-       for (i = 0; i < adapter->num_tx_queues; i++) {
-               struct ixgbe_ring *ring = adapter->tx_ring[i];
-               if (orig_node == -1) {
-                       int cur_node = next_online_node(adapter->node);
-                       if (cur_node == MAX_NUMNODES)
-                               cur_node = first_online_node;
-                       adapter->node = cur_node;
-               }
-               ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
-                                   adapter->node);
+       if (nid < 0 || !node_online(nid))
+               nid = first_online_node;
+
+       for (; tx < adapter->num_tx_queues; tx++) {
+               struct ixgbe_ring *ring;
+
+               ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid);
                if (!ring)
-                       ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
+                       ring = kzalloc(sizeof(*ring), GFP_KERNEL);
                if (!ring)
-                       goto err_tx_ring_allocation;
+                       goto err_allocation;
                ring->count = adapter->tx_ring_count;
-               ring->queue_index = i;
+               ring->queue_index = tx;
+               ring->numa_node = nid;
                ring->dev = &adapter->pdev->dev;
                ring->netdev = adapter->netdev;
-               ring->numa_node = adapter->node;
 
-               adapter->tx_ring[i] = ring;
+               adapter->tx_ring[tx] = ring;
        }
 
-       /* Restore the adapter's original node */
-       adapter->node = orig_node;
+       for (; rx < adapter->num_rx_queues; rx++) {
+               struct ixgbe_ring *ring;
 
-       rx_count = adapter->rx_ring_count;
-       for (i = 0; i < adapter->num_rx_queues; i++) {
-               struct ixgbe_ring *ring = adapter->rx_ring[i];
-               if (orig_node == -1) {
-                       int cur_node = next_online_node(adapter->node);
-                       if (cur_node == MAX_NUMNODES)
-                               cur_node = first_online_node;
-                       adapter->node = cur_node;
-               }
-               ring = kzalloc_node(sizeof(struct ixgbe_ring), GFP_KERNEL,
-                                   adapter->node);
+               ring = kzalloc_node(sizeof(*ring), GFP_KERNEL, nid);
                if (!ring)
-                       ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
+                       ring = kzalloc(sizeof(*ring), GFP_KERNEL);
                if (!ring)
-                       goto err_rx_ring_allocation;
-               ring->count = rx_count;
-               ring->queue_index = i;
+                       goto err_allocation;
+               ring->count = adapter->rx_ring_count;
+               ring->queue_index = rx;
+               ring->numa_node = nid;
                ring->dev = &adapter->pdev->dev;
                ring->netdev = adapter->netdev;
-               ring->numa_node = adapter->node;
 
-               adapter->rx_ring[i] = ring;
+               adapter->rx_ring[rx] = ring;
        }
 
-       /* Restore the adapter's original node */
-       adapter->node = orig_node;
-
        ixgbe_cache_ring_register(adapter);
 
        return 0;
 
-err_rx_ring_allocation:
-       for (i = 0; i < adapter->num_tx_queues; i++)
-               kfree(adapter->tx_ring[i]);
-err_tx_ring_allocation:
+err_allocation:
+       while (tx)
+               kfree(adapter->tx_ring[--tx]);
+
+       while (rx)
+               kfree(adapter->rx_ring[--rx]);
        return -ENOMEM;
 }
 
@@ -4870,11 +5064,14 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
        adapter->ring_feature[RING_F_RSS].indices = rss;
        adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
        adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
-       if (hw->mac.type == ixgbe_mac_82598EB) {
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
                if (hw->device_id == IXGBE_DEV_ID_82598AT)
                        adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
                adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
-       } else if (hw->mac.type == ixgbe_mac_82599EB) {
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
                adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
                adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
                adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
@@ -4903,6 +5100,9 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
                adapter->fcoe.up = IXGBE_FCOE_DEFTC;
 #endif
 #endif /* IXGBE_FCOE */
+               break;
+       default:
+               break;
        }
 
 #ifdef CONFIG_IXGBE_DCB
@@ -5385,10 +5585,17 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
                IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
        }
 
-       if (wufc && hw->mac.type == ixgbe_mac_82599EB)
-               pci_wake_from_d3(pdev, true);
-       else
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
                pci_wake_from_d3(pdev, false);
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
+               pci_wake_from_d3(pdev, !!wufc);
+               break;
+       default:
+               break;
+       }
 
        *enable_wake = !!wufc;
 
@@ -5507,17 +5714,18 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
                hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
                hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
                hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
-               if (hw->mac.type == ixgbe_mac_82599EB) {
-                       hwstats->pxonrxc[i] +=
-                               IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
-                       hwstats->pxoffrxc[i] +=
-                               IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
-                       hwstats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
-               } else {
+               switch (hw->mac.type) {
+               case ixgbe_mac_82598EB:
                        hwstats->pxonrxc[i] +=
                                IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
-                       hwstats->pxoffrxc[i] +=
-                               IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
+                       break;
+               case ixgbe_mac_82599EB:
+               case ixgbe_mac_X540:
+                       hwstats->pxonrxc[i] +=
+                               IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
+                       break;
+               default:
+                       break;
                }
                hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
                hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
@@ -5526,21 +5734,25 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
        /* work around hardware counting issue */
        hwstats->gprc -= missed_rx;
 
+       ixgbe_update_xoff_received(adapter);
+
        /* 82598 hardware only has a 32 bit counter in the high register */
-       if (hw->mac.type == ixgbe_mac_82599EB) {
-               u64 tmp;
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
+               hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
+               hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
+               hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
+               hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
+               break;
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
                hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
-               tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF;
-                                               /* 4 high bits of GORC */
-               hwstats->gorc += (tmp << 32);
+               IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
                hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
-               tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF;
-                                               /* 4 high bits of GOTC */
-               hwstats->gotc += (tmp << 32);
+               IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */
                hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
-               IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
+               IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
                hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
-               hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
                hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
                hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS);
 #ifdef IXGBE_FCOE
@@ -5551,12 +5763,9 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
                hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
                hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
 #endif /* IXGBE_FCOE */
-       } else {
-               hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
-               hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
-               hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
-               hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
-               hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
+               break;
+       default:
+               break;
        }
        bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
        hwstats->bprc += bprc;
@@ -5792,17 +6001,27 @@ static void ixgbe_watchdog_task(struct work_struct *work)
                if (!netif_carrier_ok(netdev)) {
                        bool flow_rx, flow_tx;
 
-                       if (hw->mac.type == ixgbe_mac_82599EB) {
-                               u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
-                               u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
-                               flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
-                               flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
-                       } else {
+                       switch (hw->mac.type) {
+                       case ixgbe_mac_82598EB: {
                                u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
                                u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
                                flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
                                flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
                        }
+                               break;
+                       case ixgbe_mac_82599EB:
+                       case ixgbe_mac_X540: {
+                               u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+                               u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
+                               flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
+                               flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
+                       }
+                               break;
+                       default:
+                               flow_tx = false;
+                               flow_rx = false;
+                               break;
+                       }
 
                        e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
                               (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
@@ -6220,47 +6439,34 @@ static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring,
 }
 
 static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
-                     int queue, u32 tx_flags, __be16 protocol)
+                     u8 queue, u32 tx_flags, __be16 protocol)
 {
        struct ixgbe_atr_input atr_input;
-       struct tcphdr *th;
        struct iphdr *iph = ip_hdr(skb);
        struct ethhdr *eth = (struct ethhdr *)skb->data;
-       u16 vlan_id, src_port, dst_port, flex_bytes;
-       u32 src_ipv4_addr, dst_ipv4_addr;
-       u8 l4type = 0;
+       struct tcphdr *th;
+       u16 vlan_id;
 
-       /* Right now, we support IPv4 only */
-       if (protocol != htons(ETH_P_IP))
-               return;
-       /* check if we're UDP or TCP */
-       if (iph->protocol == IPPROTO_TCP) {
-               th = tcp_hdr(skb);
-               src_port = th->source;
-               dst_port = th->dest;
-               l4type |= IXGBE_ATR_L4TYPE_TCP;
-               /* l4type IPv4 type is 0, no need to assign */
-       } else {
-               /* Unsupported L4 header, just bail here */
+       /* Right now, we support IPv4 w/ TCP only */
+       if (protocol != htons(ETH_P_IP) ||
+           iph->protocol != IPPROTO_TCP)
                return;
-       }
 
        memset(&atr_input, 0, sizeof(struct ixgbe_atr_input));
 
        vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >>
                   IXGBE_TX_FLAGS_VLAN_SHIFT;
-       src_ipv4_addr = iph->saddr;
-       dst_ipv4_addr = iph->daddr;
-       flex_bytes = eth->h_proto;
+
+       th = tcp_hdr(skb);
 
        ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id);
-       ixgbe_atr_set_src_port_82599(&atr_input, dst_port);
-       ixgbe_atr_set_dst_port_82599(&atr_input, src_port);
-       ixgbe_atr_set_flex_byte_82599(&atr_input, flex_bytes);
-       ixgbe_atr_set_l4type_82599(&atr_input, l4type);
+       ixgbe_atr_set_src_port_82599(&atr_input, th->dest);
+       ixgbe_atr_set_dst_port_82599(&atr_input, th->source);
+       ixgbe_atr_set_flex_byte_82599(&atr_input, eth->h_proto);
+       ixgbe_atr_set_l4type_82599(&atr_input, IXGBE_ATR_L4TYPE_TCP);
        /* src and dst are inverted, think how the receiver sees them */
-       ixgbe_atr_set_src_ipv4_82599(&atr_input, dst_ipv4_addr);
-       ixgbe_atr_set_dst_ipv4_82599(&atr_input, src_ipv4_addr);
+       ixgbe_atr_set_src_ipv4_82599(&atr_input, iph->daddr);
+       ixgbe_atr_set_dst_ipv4_82599(&atr_input, iph->saddr);
 
        /* This assumes the Rx queue and Tx queue are bound to the same CPU */
        ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue);
@@ -6878,8 +7084,14 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
                goto err_sw_init;
 
        /* Make it possible the adapter to be woken up via WOL */
-       if (adapter->hw.mac.type == ixgbe_mac_82599EB)
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_82599EB:
+       case ixgbe_mac_X540:
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0);
+               break;
+       default:
+               break;
+       }
 
        /*
         * If there is a fan on this device and it has failed log the
@@ -6988,7 +7200,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
        }
 
        /* power down the optics */
-       if (hw->phy.multispeed_fiber)
+       if (hw->phy.multispeed_fiber && hw->mac.ops.disable_tx_laser)
                hw->mac.ops.disable_tx_laser(hw);
 
        init_timer(&adapter->watchdog_timer);
@@ -7003,6 +7215,13 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
                goto err_sw_init;
 
        switch (pdev->device) {
+       case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
+               /* All except this subdevice support WOL */
+               if (pdev->subsystem_device ==
+                   IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) {
+                       adapter->wol = 0;
+                       break;
+               }
        case IXGBE_DEV_ID_82599_KX4:
                adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
                                IXGBE_WUFC_MC | IXGBE_WUFC_BC);