]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - drivers/net/ixgbe/ixgbe_main.c
ixgbe: cleanup unnecessary return value in ixgbe_cache_ring_rss
[net-next-2.6.git] / drivers / net / ixgbe / ixgbe_main.c
index b798501500e6f039540c6c8ef2cd43b52bd4335a..5f7929f52fe4ba635b8dce11a652524283253aec 100644 (file)
@@ -589,14 +589,19 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
 {
        u32 mask;
 
-       if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_82598EB:
                mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS, mask);
-       } else {
+               break;
+       case ixgbe_mac_82599EB:
                mask = (qmask & 0xFFFFFFFF);
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask);
                mask = (qmask >> 32);
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(1), mask);
+               break;
+       default:
+               break;
        }
 }
 
@@ -672,6 +677,7 @@ static inline bool ixgbe_tx_xon_state(struct ixgbe_adapter *adapter,
                        break;
                default:
                        tc = 0;
+                       break;
                }
                txoff <<= tc;
        }
@@ -735,8 +741,8 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
        struct ixgbe_adapter *adapter = q_vector->adapter;
        union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
        struct ixgbe_tx_buffer *tx_buffer_info;
-       unsigned int i, eop, count = 0;
        unsigned int total_bytes = 0, total_packets = 0;
+       u16 i, eop, count = 0;
 
        i = tx_ring->next_to_clean;
        eop = tx_ring->tx_buffer_info[i].next_to_watch;
@@ -771,6 +777,23 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
        }
 
        tx_ring->next_to_clean = i;
+       tx_ring->total_bytes += total_bytes;
+       tx_ring->total_packets += total_packets;
+       u64_stats_update_begin(&tx_ring->syncp);
+       tx_ring->stats.packets += total_packets;
+       tx_ring->stats.bytes += total_bytes;
+       u64_stats_update_end(&tx_ring->syncp);
+
+       if (check_for_tx_hang(tx_ring) &&
+           ixgbe_check_tx_hang(adapter, tx_ring, i)) {
+               /* schedule immediate reset if we believe we hung */
+               e_info(probe, "tx hang %d detected, resetting "
+                      "adapter\n", adapter->tx_timeout_count + 1);
+               ixgbe_tx_timeout(adapter->netdev);
+
+               /* the adapter is about to reset, no point in enabling stuff */
+               return true;
+       }
 
 #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
        if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
@@ -786,24 +809,6 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
                }
        }
 
-       if (check_for_tx_hang(tx_ring) &&
-           ixgbe_check_tx_hang(adapter, tx_ring, i)) {
-               /* schedule immediate reset if we believe we hung */
-               e_info(probe, "tx hang %d detected, resetting "
-                      "adapter\n", adapter->tx_timeout_count + 1);
-               ixgbe_tx_timeout(adapter->netdev);
-       }
-
-       /* re-arm the interrupt */
-       if (count >= tx_ring->work_limit)
-               ixgbe_irq_rearm_queues(adapter, ((u64)1 << q_vector->v_idx));
-
-       tx_ring->total_bytes += total_bytes;
-       tx_ring->total_packets += total_packets;
-       u64_stats_update_begin(&tx_ring->syncp);
-       tx_ring->stats.packets += total_packets;
-       tx_ring->stats.bytes += total_bytes;
-       u64_stats_update_end(&tx_ring->syncp);
        return count < tx_ring->work_limit;
 }
 
@@ -1129,43 +1134,39 @@ no_buffers:
        }
 }
 
-static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
-{
-       return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
-}
-
-static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
-{
-       return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
-}
-
-static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc)
+static inline u16 ixgbe_get_hlen(union ixgbe_adv_rx_desc *rx_desc)
 {
-       return (le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
-               IXGBE_RXDADV_RSCCNT_MASK) >>
-               IXGBE_RXDADV_RSCCNT_SHIFT;
+       /* HW will not DMA in data larger than the given buffer, even if it
+        * parses the (NFS, of course) header to be larger.  In that case, it
+        * fills the header buffer and spills the rest into the page.
+        */
+       u16 hdr_info = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info);
+       u16 hlen = (hdr_info &  IXGBE_RXDADV_HDRBUFLEN_MASK) >>
+                   IXGBE_RXDADV_HDRBUFLEN_SHIFT;
+       if (hlen > IXGBE_RX_HDR_SIZE)
+               hlen = IXGBE_RX_HDR_SIZE;
+       return hlen;
 }
 
 /**
  * ixgbe_transform_rsc_queue - change rsc queue into a full packet
  * @skb: pointer to the last skb in the rsc queue
- * @count: pointer to number of packets coalesced in this context
  *
  * This function changes a queue full of hw rsc buffers into a completed
  * packet.  It uses the ->prev pointers to find the first packet and then
  * turns it into the frag list owner.
  **/
-static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
-                                                       u64 *count)
+static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb)
 {
        unsigned int frag_list_size = 0;
+       unsigned int skb_cnt = 1;
 
        while (skb->prev) {
                struct sk_buff *prev = skb->prev;
                frag_list_size += skb->len;
                skb->prev = NULL;
                skb = prev;
-               *count += 1;
+               skb_cnt++;
        }
 
        skb_shinfo(skb)->frag_list = skb->next;
@@ -1173,17 +1174,18 @@ static inline struct sk_buff *ixgbe_transform_rsc_queue(struct sk_buff *skb,
        skb->len += frag_list_size;
        skb->data_len += frag_list_size;
        skb->truesize += frag_list_size;
+       IXGBE_RSC_CB(skb)->skb_cnt = skb_cnt;
+
        return skb;
 }
 
-struct ixgbe_rsc_cb {
-       dma_addr_t dma;
-       bool delay_unmap;
-};
-
-#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
+static inline bool ixgbe_get_rsc_state(union ixgbe_adv_rx_desc *rx_desc)
+{
+       return !!(le32_to_cpu(rx_desc->wb.lower.lo_dword.data) &
+               IXGBE_RXDADV_RSCCNT_MASK);
+}
 
-static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
+static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                               struct ixgbe_ring *rx_ring,
                               int *work_done, int work_to_do)
 {
@@ -1191,49 +1193,40 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
        union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
        struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
        struct sk_buff *skb;
-       unsigned int i, rsc_count = 0;
-       u32 len, staterr;
-       u16 hdr_info;
-       bool cleaned = false;
-       int cleaned_count = 0;
        unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+       const int current_node = numa_node_id();
 #ifdef IXGBE_FCOE
        int ddp_bytes = 0;
 #endif /* IXGBE_FCOE */
+       u32 staterr;
+       u16 i;
+       u16 cleaned_count = 0;
+       bool pkt_is_rsc = false;
 
        i = rx_ring->next_to_clean;
        rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
        staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
-       rx_buffer_info = &rx_ring->rx_buffer_info[i];
 
        while (staterr & IXGBE_RXD_STAT_DD) {
                u32 upper_len = 0;
-               if (*work_done >= work_to_do)
-                       break;
-               (*work_done)++;
 
                rmb(); /* read descriptor and rx_buffer_info after status DD */
-               if (ring_is_ps_enabled(rx_ring)) {
-                       hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
-                       len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
-                              IXGBE_RXDADV_HDRBUFLEN_SHIFT;
-                       upper_len = le16_to_cpu(rx_desc->wb.upper.length);
-                       if ((len > IXGBE_RX_HDR_SIZE) ||
-                           (upper_len && !(hdr_info & IXGBE_RXDADV_SPH)))
-                               len = IXGBE_RX_HDR_SIZE;
-               } else {
-                       len = le16_to_cpu(rx_desc->wb.upper.length);
-               }
 
-               cleaned = true;
+               rx_buffer_info = &rx_ring->rx_buffer_info[i];
+
                skb = rx_buffer_info->skb;
-               prefetch(skb->data);
                rx_buffer_info->skb = NULL;
+               prefetch(skb->data);
 
+               if (ring_is_rsc_enabled(rx_ring))
+                       pkt_is_rsc = ixgbe_get_rsc_state(rx_desc);
+
+               /* if this is a skb from previous receive DMA will be 0 */
                if (rx_buffer_info->dma) {
-                       if ((adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
-                           (!(staterr & IXGBE_RXD_STAT_EOP)) &&
-                                (!(skb->prev))) {
+                       u16 hlen;
+                       if (pkt_is_rsc &&
+                           !(staterr & IXGBE_RXD_STAT_EOP) &&
+                           !skb->prev) {
                                /*
                                 * When HWRSC is enabled, delay unmapping
                                 * of the first packet. It carries the
@@ -1250,7 +1243,18 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                                                 DMA_FROM_DEVICE);
                        }
                        rx_buffer_info->dma = 0;
-                       skb_put(skb, len);
+
+                       if (ring_is_ps_enabled(rx_ring)) {
+                               hlen = ixgbe_get_hlen(rx_desc);
+                               upper_len = le16_to_cpu(rx_desc->wb.upper.length);
+                       } else {
+                               hlen = le16_to_cpu(rx_desc->wb.upper.length);
+                       }
+
+                       skb_put(skb, hlen);
+               } else {
+                       /* assume packet split since header is unmapped */
+                       upper_len = le16_to_cpu(rx_desc->wb.upper.length);
                }
 
                if (upper_len) {
@@ -1264,11 +1268,11 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                                           rx_buffer_info->page_offset,
                                           upper_len);
 
-                       if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
-                           (page_count(rx_buffer_info->page) != 1))
-                               rx_buffer_info->page = NULL;
-                       else
+                       if ((page_count(rx_buffer_info->page) == 1) &&
+                           (page_to_nid(rx_buffer_info->page) == current_node))
                                get_page(rx_buffer_info->page);
+                       else
+                               rx_buffer_info->page = NULL;
 
                        skb->len += upper_len;
                        skb->data_len += upper_len;
@@ -1283,10 +1287,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                prefetch(next_rxd);
                cleaned_count++;
 
-               if (ring_is_rsc_enabled(rx_ring))
-                       rsc_count = ixgbe_get_rsc_count(rx_desc);
-
-               if (rsc_count) {
+               if (pkt_is_rsc) {
                        u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
                                     IXGBE_RXDADV_NEXTP_SHIFT;
                        next_buffer = &rx_ring->rx_buffer_info[nextp];
@@ -1294,31 +1295,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                        next_buffer = &rx_ring->rx_buffer_info[i];
                }
 
-               if (staterr & IXGBE_RXD_STAT_EOP) {
-                       if (skb->prev)
-                               skb = ixgbe_transform_rsc_queue(skb,
-                                               &(rx_ring->rx_stats.rsc_count));
-                       if (ring_is_rsc_enabled(rx_ring)) {
-                               if (IXGBE_RSC_CB(skb)->delay_unmap) {
-                                       dma_unmap_single(rx_ring->dev,
-                                                        IXGBE_RSC_CB(skb)->dma,
-                                                        rx_ring->rx_buf_len,
-                                                        DMA_FROM_DEVICE);
-                                       IXGBE_RSC_CB(skb)->dma = 0;
-                                       IXGBE_RSC_CB(skb)->delay_unmap = false;
-                               }
-                               if (ring_is_ps_enabled(rx_ring))
-                                       rx_ring->rx_stats.rsc_count +=
-                                                skb_shinfo(skb)->nr_frags;
-                               else
-                                       rx_ring->rx_stats.rsc_count++;
-                               rx_ring->rx_stats.rsc_flush++;
-                       }
-                       u64_stats_update_begin(&rx_ring->syncp);
-                       rx_ring->stats.packets++;
-                       rx_ring->stats.bytes += skb->len;
-                       u64_stats_update_end(&rx_ring->syncp);
-               } else {
+               if (!(staterr & IXGBE_RXD_STAT_EOP)) {
                        if (ring_is_ps_enabled(rx_ring)) {
                                rx_buffer_info->skb = next_buffer->skb;
                                rx_buffer_info->dma = next_buffer->dma;
@@ -1332,8 +1309,41 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                        goto next_desc;
                }
 
+               if (skb->prev) {
+                       skb = ixgbe_transform_rsc_queue(skb);
+                       /* if we got here without RSC the packet is invalid */
+                       if (!pkt_is_rsc) {
+                               __pskb_trim(skb, 0);
+                               rx_buffer_info->skb = skb;
+                               goto next_desc;
+                       }
+               }
+
+               if (ring_is_rsc_enabled(rx_ring)) {
+                       if (IXGBE_RSC_CB(skb)->delay_unmap) {
+                               dma_unmap_single(rx_ring->dev,
+                                                IXGBE_RSC_CB(skb)->dma,
+                                                rx_ring->rx_buf_len,
+                                                DMA_FROM_DEVICE);
+                               IXGBE_RSC_CB(skb)->dma = 0;
+                               IXGBE_RSC_CB(skb)->delay_unmap = false;
+                       }
+               }
+               if (pkt_is_rsc) {
+                       if (ring_is_ps_enabled(rx_ring))
+                               rx_ring->rx_stats.rsc_count +=
+                                       skb_shinfo(skb)->nr_frags;
+                       else
+                               rx_ring->rx_stats.rsc_count +=
+                                       IXGBE_RSC_CB(skb)->skb_cnt;
+                       rx_ring->rx_stats.rsc_flush++;
+               }
+
+               /* ERR_MASK will only have valid bits if EOP set */
                if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
-                       dev_kfree_skb_irq(skb);
+                       /* trim packet back to size 0 and recycle it */
+                       __pskb_trim(skb, 0);
+                       rx_buffer_info->skb = skb;
                        goto next_desc;
                }
 
@@ -1357,6 +1367,10 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
 next_desc:
                rx_desc->wb.upper.status_error = 0;
 
+               (*work_done)++;
+               if (*work_done >= work_to_do)
+                       break;
+
                /* return some buffers to hardware, one at a time is too slow */
                if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
                        ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
@@ -1365,8 +1379,6 @@ next_desc:
 
                /* use prefetched values */
                rx_desc = next_rxd;
-               rx_buffer_info = &rx_ring->rx_buffer_info[i];
-
                staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
        }
 
@@ -1393,8 +1405,10 @@ next_desc:
 
        rx_ring->total_packets += total_rx_packets;
        rx_ring->total_bytes += total_rx_bytes;
-
-       return cleaned;
+       u64_stats_update_begin(&rx_ring->syncp);
+       rx_ring->stats.packets += total_rx_packets;
+       rx_ring->stats.bytes += total_rx_bytes;
+       u64_stats_update_end(&rx_ring->syncp);
 }
 
 static int ixgbe_clean_rxonly(struct napi_struct *, int);
@@ -1466,11 +1480,18 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter)
                }
        }
 
-       if (adapter->hw.mac.type == ixgbe_mac_82598EB)
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_82598EB:
                ixgbe_set_ivar(adapter, -1, IXGBE_IVAR_OTHER_CAUSES_INDEX,
                               v_idx);
-       else if (adapter->hw.mac.type == ixgbe_mac_82599EB)
+               break;
+       case ixgbe_mac_82599EB:
                ixgbe_set_ivar(adapter, -1, 1, v_idx);
+               break;
+
+       default:
+               break;
+       }
        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITR(v_idx), 1950);
 
        /* set up to autoclear timer, and the vectors */
@@ -1566,10 +1587,12 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
        int v_idx = q_vector->v_idx;
        u32 itr_reg = EITR_INTS_PER_SEC_TO_REG(q_vector->eitr);
 
-       if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_82598EB:
                /* must write high and low 16 bits to reset counter */
                itr_reg |= (itr_reg << 16);
-       } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+               break;
+       case ixgbe_mac_82599EB:
                /*
                 * 82599 can support a value of zero, so allow it for
                 * max interrupt rate, but there is an errata where it can
@@ -1584,6 +1607,9 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector)
                 * immediate assertion of the interrupt
                 */
                itr_reg |= IXGBE_EITR_CNT_WDIS;
+               break;
+       default:
+               break;
        }
        IXGBE_WRITE_REG(hw, IXGBE_EITR(v_idx), itr_reg);
 }
@@ -1712,17 +1738,18 @@ static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr)
 {
        struct ixgbe_hw *hw = &adapter->hw;
 
+       if (eicr & IXGBE_EICR_GPI_SDP2) {
+               /* Clear the interrupt */
+               IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
+               if (!test_bit(__IXGBE_DOWN, &adapter->state))
+                       schedule_work(&adapter->sfp_config_module_task);
+       }
+
        if (eicr & IXGBE_EICR_GPI_SDP1) {
                /* Clear the interrupt */
                IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
-               schedule_work(&adapter->multispeed_fiber_task);
-       } else if (eicr & IXGBE_EICR_GPI_SDP2) {
-               /* Clear the interrupt */
-               IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
-               schedule_work(&adapter->sfp_config_module_task);
-       } else {
-               /* Interrupt isn't for us... */
-               return;
+               if (!test_bit(__IXGBE_DOWN, &adapter->state))
+                       schedule_work(&adapter->multispeed_fiber_task);
        }
 }
 
@@ -1762,16 +1789,8 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
        if (eicr & IXGBE_EICR_MAILBOX)
                ixgbe_msg_task(adapter);
 
-       if (hw->mac.type == ixgbe_mac_82598EB)
-               ixgbe_check_fan_failure(adapter, eicr);
-
-       if (hw->mac.type == ixgbe_mac_82599EB) {
-               ixgbe_check_sfp_event(adapter, eicr);
-               adapter->interrupt_event = eicr;
-               if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
-                   ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)))
-                       schedule_work(&adapter->check_overtemp_task);
-
+       switch (hw->mac.type) {
+       case ixgbe_mac_82599EB:
                /* Handle Flow Director Full threshold interrupt */
                if (eicr & IXGBE_EICR_FLOW_DIR) {
                        int i;
@@ -1786,7 +1805,19 @@ static irqreturn_t ixgbe_msix_lsc(int irq, void *data)
                                        schedule_work(&adapter->fdir_reinit_task);
                        }
                }
+               ixgbe_check_sfp_event(adapter, eicr);
+               if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
+                   ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
+                       adapter->interrupt_event = eicr;
+                       schedule_work(&adapter->check_overtemp_task);
+               }
+               break;
+       default:
+               break;
        }
+
+       ixgbe_check_fan_failure(adapter, eicr);
+
        if (!test_bit(__IXGBE_DOWN, &adapter->state))
                IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_OTHER);
 
@@ -1797,15 +1828,23 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter,
                                           u64 qmask)
 {
        u32 mask;
+       struct ixgbe_hw *hw = &adapter->hw;
 
-       if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
                mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS, mask);
-       } else {
+               IXGBE_WRITE_REG(hw, IXGBE_EIMS, mask);
+               break;
+       case ixgbe_mac_82599EB:
                mask = (qmask & 0xFFFFFFFF);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(0), mask);
+               if (mask)
+                       IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask);
                mask = (qmask >> 32);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMS_EX(1), mask);
+               if (mask)
+                       IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(1), mask);
+               break;
+       default:
+               break;
        }
        /* skip the flush */
 }
@@ -1814,15 +1853,23 @@ static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter,
                                            u64 qmask)
 {
        u32 mask;
+       struct ixgbe_hw *hw = &adapter->hw;
 
-       if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
                mask = (IXGBE_EIMS_RTX_QUEUE & qmask);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, mask);
-       } else {
+               IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask);
+               break;
+       case ixgbe_mac_82599EB:
                mask = (qmask & 0xFFFFFFFF);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), mask);
+               if (mask)
+                       IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask);
                mask = (qmask >> 32);
-               IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), mask);
+               if (mask)
+                       IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask);
+               break;
+       default:
+               break;
        }
        /* skip the flush */
 }
@@ -2182,9 +2229,11 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter)
                } else if (handler == &ixgbe_msix_clean_tx) {
                        sprintf(adapter->name[vector], "%s-%s-%d",
                                netdev->name, "tx", ti++);
-               } else
+               } else {
                        sprintf(adapter->name[vector], "%s-%s-%d",
-                               netdev->name, "TxRx", vector);
+                               netdev->name, "TxRx", ri++);
+                       ti++;
+               }
 
                err = request_irq(adapter->msix_entries[vector].vector,
                                  handler, 0, adapter->name[vector],
@@ -2277,12 +2326,16 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues,
                mask |= IXGBE_EIMS_GPI_SDP0;
        if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE)
                mask |= IXGBE_EIMS_GPI_SDP1;
-       if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_82599EB:
                mask |= IXGBE_EIMS_ECC;
                mask |= IXGBE_EIMS_GPI_SDP1;
                mask |= IXGBE_EIMS_GPI_SDP2;
                if (adapter->num_vfs)
                        mask |= IXGBE_EIMS_MAILBOX;
+               break;
+       default:
+               break;
        }
        if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE ||
            adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)
@@ -2338,13 +2391,20 @@ static irqreturn_t ixgbe_intr(int irq, void *data)
        if (eicr & IXGBE_EICR_LSC)
                ixgbe_check_lsc(adapter);
 
-       if (hw->mac.type == ixgbe_mac_82599EB)
+       switch (hw->mac.type) {
+       case ixgbe_mac_82599EB:
                ixgbe_check_sfp_event(adapter, eicr);
+               if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
+                   ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC))) {
+                       adapter->interrupt_event = eicr;
+                       schedule_work(&adapter->check_overtemp_task);
+               }
+               break;
+       default:
+               break;
+       }
 
        ixgbe_check_fan_failure(adapter, eicr);
-       if ((adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) &&
-           ((eicr & IXGBE_EICR_GPI_SDP0) || (eicr & IXGBE_EICR_LSC)))
-               schedule_work(&adapter->check_overtemp_task);
 
        if (napi_schedule_prep(&(q_vector->napi))) {
                adapter->tx_ring[0]->total_packets = 0;
@@ -2437,14 +2497,19 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter)
  **/
 static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter)
 {
-       if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_82598EB:
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ~0);
-       } else {
+               break;
+       case ixgbe_mac_82599EB:
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000);
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0);
                IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0);
                if (adapter->num_vfs > 32)
                        IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, 0);
+               break;
+       default:
+               break;
        }
        IXGBE_WRITE_FLUSH(&adapter->hw);
        if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) {
@@ -2521,7 +2586,14 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
        }
 
        /* reinitialize flowdirector state */
-       set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
+       if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
+           adapter->atr_sample_rate) {
+               ring->atr_sample_rate = adapter->atr_sample_rate;
+               ring->atr_count = 0;
+               set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
+       } else {
+               ring->atr_sample_rate = 0;
+       }
 
        /* enable queue */
        txdctl |= IXGBE_TXDCTL_ENABLE;
@@ -2612,15 +2684,20 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter,
                                   struct ixgbe_ring *rx_ring)
 {
        u32 srrctl;
-       int index;
-       struct ixgbe_ring_feature *feature = adapter->ring_feature;
+       int index = rx_ring->reg_idx;
 
-       index = rx_ring->reg_idx;
-       if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
-               unsigned long mask;
-               mask = (unsigned long) feature[RING_F_RSS].mask;
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_82598EB: {
+               struct ixgbe_ring_feature *feature = adapter->ring_feature;
+               const int mask = feature[RING_F_RSS].mask;
                index = index & mask;
        }
+               break;
+       case ixgbe_mac_82599EB:
+       default:
+               break;
+       }
+
        srrctl = IXGBE_READ_REG(&adapter->hw, IXGBE_SRRCTL(index));
 
        srrctl &= ~IXGBE_SRRCTL_BSIZEHDR_MASK;
@@ -3001,7 +3078,6 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
                }
 #endif /* IXGBE_FCOE */
        }
-
 }
 
 static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter)
@@ -3587,6 +3663,14 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
        clear_bit(__IXGBE_DOWN, &adapter->state);
        ixgbe_napi_enable_all(adapter);
 
+       if (ixgbe_is_sfp(hw)) {
+               ixgbe_sfp_link_config(adapter);
+       } else {
+               err = ixgbe_non_sfp_link_config(hw);
+               if (err)
+                       e_err(probe, "link_config FAILED %d\n", err);
+       }
+
        /* clear any pending interrupts, may auto mask */
        IXGBE_READ_REG(hw, IXGBE_EICR);
        ixgbe_irq_enable(adapter, true, true);
@@ -3609,26 +3693,8 @@ static int ixgbe_up_complete(struct ixgbe_adapter *adapter)
         * If we're not hot-pluggable SFP+, we just need to configure link
         * and bring it up.
         */
-       if (hw->phy.type == ixgbe_phy_unknown) {
-               err = hw->phy.ops.identify(hw);
-               if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
-                       /*
-                        * Take the device down and schedule the sfp tasklet
-                        * which will unregister_netdev and log it.
-                        */
-                       ixgbe_down(adapter);
-                       schedule_work(&adapter->sfp_config_module_task);
-                       return err;
-               }
-       }
-
-       if (ixgbe_is_sfp(hw)) {
-               ixgbe_sfp_link_config(adapter);
-       } else {
-               err = ixgbe_non_sfp_link_config(hw);
-               if (err)
-                       e_err(probe, "link_config FAILED %d\n", err);
-       }
+       if (hw->phy.type == ixgbe_phy_unknown)
+               schedule_work(&adapter->sfp_config_module_task);
 
        /* enable transmits */
        netif_tx_start_all_queues(adapter->netdev);
@@ -3892,10 +3958,15 @@ void ixgbe_down(struct ixgbe_adapter *adapter)
                                (txdctl & ~IXGBE_TXDCTL_ENABLE));
        }
        /* Disable the Tx DMA engine on 82599 */
-       if (hw->mac.type == ixgbe_mac_82599EB)
+       switch (hw->mac.type) {
+       case ixgbe_mac_82599EB:
                IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL,
                                (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) &
                                 ~IXGBE_DMATXCTL_TE));
+               break;
+       default:
+               break;
+       }
 
        /* power down the optics */
        if (hw->phy.multispeed_fiber)
@@ -4224,19 +4295,16 @@ static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter,
 static inline bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter)
 {
        int i;
-       bool ret = false;
 
-       if (adapter->flags & IXGBE_FLAG_RSS_ENABLED) {
-               for (i = 0; i < adapter->num_rx_queues; i++)
-                       adapter->rx_ring[i]->reg_idx = i;
-               for (i = 0; i < adapter->num_tx_queues; i++)
-                       adapter->tx_ring[i]->reg_idx = i;
-               ret = true;
-       } else {
-               ret = false;
-       }
+       if (!(adapter->flags & IXGBE_FLAG_RSS_ENABLED))
+               return false;
 
-       return ret;
+       for (i = 0; i < adapter->num_rx_queues; i++)
+               adapter->rx_ring[i]->reg_idx = i;
+       for (i = 0; i < adapter->num_tx_queues; i++)
+               adapter->tx_ring[i]->reg_idx = i;
+
+       return true;
 }
 
 #ifdef CONFIG_IXGBE_DCB
@@ -4253,71 +4321,66 @@ static inline bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter)
        bool ret = false;
        int dcb_i = adapter->ring_feature[RING_F_DCB].indices;
 
-       if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
-               if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
-                       /* the number of queues is assumed to be symmetric */
-                       for (i = 0; i < dcb_i; i++) {
-                               adapter->rx_ring[i]->reg_idx = i << 3;
-                               adapter->tx_ring[i]->reg_idx = i << 2;
-                       }
-                       ret = true;
-               } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
-                       if (dcb_i == 8) {
-                               /*
-                                * Tx TC0 starts at: descriptor queue 0
-                                * Tx TC1 starts at: descriptor queue 32
-                                * Tx TC2 starts at: descriptor queue 64
-                                * Tx TC3 starts at: descriptor queue 80
-                                * Tx TC4 starts at: descriptor queue 96
-                                * Tx TC5 starts at: descriptor queue 104
-                                * Tx TC6 starts at: descriptor queue 112
-                                * Tx TC7 starts at: descriptor queue 120
-                                *
-                                * Rx TC0-TC7 are offset by 16 queues each
-                                */
-                               for (i = 0; i < 3; i++) {
-                                       adapter->tx_ring[i]->reg_idx = i << 5;
-                                       adapter->rx_ring[i]->reg_idx = i << 4;
-                               }
-                               for ( ; i < 5; i++) {
-                                       adapter->tx_ring[i]->reg_idx =
-                                                                ((i + 2) << 4);
-                                       adapter->rx_ring[i]->reg_idx = i << 4;
-                               }
-                               for ( ; i < dcb_i; i++) {
-                                       adapter->tx_ring[i]->reg_idx =
-                                                                ((i + 8) << 3);
-                                       adapter->rx_ring[i]->reg_idx = i << 4;
-                               }
+       if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED))
+               return false;
 
-                               ret = true;
-                       } else if (dcb_i == 4) {
-                               /*
-                                * Tx TC0 starts at: descriptor queue 0
-                                * Tx TC1 starts at: descriptor queue 64
-                                * Tx TC2 starts at: descriptor queue 96
-                                * Tx TC3 starts at: descriptor queue 112
-                                *
-                                * Rx TC0-TC3 are offset by 32 queues each
-                                */
-                               adapter->tx_ring[0]->reg_idx = 0;
-                               adapter->tx_ring[1]->reg_idx = 64;
-                               adapter->tx_ring[2]->reg_idx = 96;
-                               adapter->tx_ring[3]->reg_idx = 112;
-                               for (i = 0 ; i < dcb_i; i++)
-                                       adapter->rx_ring[i]->reg_idx = i << 5;
-
-                               ret = true;
-                       } else {
-                               ret = false;
+       /* the number of queues is assumed to be symmetric */
+       switch (adapter->hw.mac.type) {
+       case ixgbe_mac_82598EB:
+               for (i = 0; i < dcb_i; i++) {
+                       adapter->rx_ring[i]->reg_idx = i << 3;
+                       adapter->tx_ring[i]->reg_idx = i << 2;
+               }
+               ret = true;
+               break;
+       case ixgbe_mac_82599EB:
+               if (dcb_i == 8) {
+                       /*
+                        * Tx TC0 starts at: descriptor queue 0
+                        * Tx TC1 starts at: descriptor queue 32
+                        * Tx TC2 starts at: descriptor queue 64
+                        * Tx TC3 starts at: descriptor queue 80
+                        * Tx TC4 starts at: descriptor queue 96
+                        * Tx TC5 starts at: descriptor queue 104
+                        * Tx TC6 starts at: descriptor queue 112
+                        * Tx TC7 starts at: descriptor queue 120
+                        *
+                        * Rx TC0-TC7 are offset by 16 queues each
+                        */
+                       for (i = 0; i < 3; i++) {
+                               adapter->tx_ring[i]->reg_idx = i << 5;
+                               adapter->rx_ring[i]->reg_idx = i << 4;
                        }
-               } else {
-                       ret = false;
+                       for ( ; i < 5; i++) {
+                               adapter->tx_ring[i]->reg_idx = ((i + 2) << 4);
+                               adapter->rx_ring[i]->reg_idx = i << 4;
+                       }
+                       for ( ; i < dcb_i; i++) {
+                               adapter->tx_ring[i]->reg_idx = ((i + 8) << 3);
+                               adapter->rx_ring[i]->reg_idx = i << 4;
+                       }
+                       ret = true;
+               } else if (dcb_i == 4) {
+                       /*
+                        * Tx TC0 starts at: descriptor queue 0
+                        * Tx TC1 starts at: descriptor queue 64
+                        * Tx TC2 starts at: descriptor queue 96
+                        * Tx TC3 starts at: descriptor queue 112
+                        *
+                        * Rx TC0-TC3 are offset by 32 queues each
+                        */
+                       adapter->tx_ring[0]->reg_idx = 0;
+                       adapter->tx_ring[1]->reg_idx = 64;
+                       adapter->tx_ring[2]->reg_idx = 96;
+                       adapter->tx_ring[3]->reg_idx = 112;
+                       for (i = 0 ; i < dcb_i; i++)
+                               adapter->rx_ring[i]->reg_idx = i << 5;
+                       ret = true;
                }
-       } else {
-               ret = false;
+               break;
+       default:
+               break;
        }
-
        return ret;
 }
 #endif
@@ -4878,11 +4941,13 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
        adapter->ring_feature[RING_F_RSS].indices = rss;
        adapter->flags |= IXGBE_FLAG_RSS_ENABLED;
        adapter->ring_feature[RING_F_DCB].indices = IXGBE_MAX_DCB_INDICES;
-       if (hw->mac.type == ixgbe_mac_82598EB) {
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
                if (hw->device_id == IXGBE_DEV_ID_82598AT)
                        adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE;
                adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82598;
-       } else if (hw->mac.type == ixgbe_mac_82599EB) {
+               break;
+       case ixgbe_mac_82599EB:
                adapter->max_msix_q_vectors = MAX_MSIX_Q_VECTORS_82599;
                adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE;
                adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED;
@@ -4911,6 +4976,9 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
                adapter->fcoe.up = IXGBE_FCOE_DEFTC;
 #endif
 #endif /* IXGBE_FCOE */
+               break;
+       default:
+               break;
        }
 
 #ifdef CONFIG_IXGBE_DCB
@@ -5393,10 +5461,16 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake)
                IXGBE_WRITE_REG(hw, IXGBE_WUFC, 0);
        }
 
-       if (wufc && hw->mac.type == ixgbe_mac_82599EB)
-               pci_wake_from_d3(pdev, true);
-       else
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
                pci_wake_from_d3(pdev, false);
+               break;
+       case ixgbe_mac_82599EB:
+               pci_wake_from_d3(pdev, !!wufc);
+               break;
+       default:
+               break;
+       }
 
        *enable_wake = !!wufc;
 
@@ -5515,17 +5589,21 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
                hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC(i));
                hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i));
                hwstats->qbrc[i] += IXGBE_READ_REG(hw, IXGBE_QBRC(i));
-               if (hw->mac.type == ixgbe_mac_82599EB) {
-                       hwstats->pxonrxc[i] +=
-                               IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
-                       hwstats->pxoffrxc[i] +=
-                               IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
-                       hwstats->qprdc[i] += IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
-               } else {
+               switch (hw->mac.type) {
+               case ixgbe_mac_82598EB:
                        hwstats->pxonrxc[i] +=
                                IXGBE_READ_REG(hw, IXGBE_PXONRXC(i));
                        hwstats->pxoffrxc[i] +=
                                IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i));
+                       break;
+               case ixgbe_mac_82599EB:
+                       hwstats->pxonrxc[i] +=
+                               IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i));
+                       hwstats->pxoffrxc[i] +=
+                               IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i));
+                       break;
+               default:
+                       break;
                }
                hwstats->pxontxc[i] += IXGBE_READ_REG(hw, IXGBE_PXONTXC(i));
                hwstats->pxofftxc[i] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(i));
@@ -5535,18 +5613,21 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
        hwstats->gprc -= missed_rx;
 
        /* 82598 hardware only has a 32 bit counter in the high register */
-       if (hw->mac.type == ixgbe_mac_82599EB) {
-               u64 tmp;
+       switch (hw->mac.type) {
+       case ixgbe_mac_82598EB:
+               hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
+               hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
+               hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
+               hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
+               hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
+               break;
+       case ixgbe_mac_82599EB:
                hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCL);
-               tmp = IXGBE_READ_REG(hw, IXGBE_GORCH) & 0xF;
-                                               /* 4 high bits of GORC */
-               hwstats->gorc += (tmp << 32);
+               IXGBE_READ_REG(hw, IXGBE_GORCH); /* to clear */
                hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCL);
-               tmp = IXGBE_READ_REG(hw, IXGBE_GOTCH) & 0xF;
-                                               /* 4 high bits of GOTC */
-               hwstats->gotc += (tmp << 32);
+               IXGBE_READ_REG(hw, IXGBE_GOTCH); /* to clear */
                hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL);
-               IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
+               IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */
                hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT);
                hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXCNT);
                hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH);
@@ -5559,12 +5640,9 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
                hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC);
                hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC);
 #endif /* IXGBE_FCOE */
-       } else {
-               hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXC);
-               hwstats->lxoffrxc += IXGBE_READ_REG(hw, IXGBE_LXOFFRXC);
-               hwstats->gorc += IXGBE_READ_REG(hw, IXGBE_GORCH);
-               hwstats->gotc += IXGBE_READ_REG(hw, IXGBE_GOTCH);
-               hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH);
+               break;
+       default:
+               break;
        }
        bprc = IXGBE_READ_REG(hw, IXGBE_BPRC);
        hwstats->bprc += bprc;
@@ -5800,17 +5878,26 @@ static void ixgbe_watchdog_task(struct work_struct *work)
                if (!netif_carrier_ok(netdev)) {
                        bool flow_rx, flow_tx;
 
-                       if (hw->mac.type == ixgbe_mac_82599EB) {
-                               u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
-                               u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
-                               flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
-                               flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
-                       } else {
+                       switch (hw->mac.type) {
+                       case ixgbe_mac_82598EB: {
                                u32 frctl = IXGBE_READ_REG(hw, IXGBE_FCTRL);
                                u32 rmcs = IXGBE_READ_REG(hw, IXGBE_RMCS);
                                flow_rx = !!(frctl & IXGBE_FCTRL_RFCE);
                                flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X);
                        }
+                               break;
+                       case ixgbe_mac_82599EB: {
+                               u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN);
+                               u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG);
+                               flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE);
+                               flow_tx = !!(fccfg & IXGBE_FCCFG_TFCE_802_3X);
+                       }
+                               break;
+                       default:
+                               flow_tx = false;
+                               flow_rx = false;
+                               break;
+                       }
 
                        e_info(drv, "NIC Link is Up %s, Flow Control: %s\n",
                               (link_speed == IXGBE_LINK_SPEED_10GB_FULL ?
@@ -6228,47 +6315,34 @@ static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring,
 }
 
 static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
-                     int queue, u32 tx_flags, __be16 protocol)
+                     u8 queue, u32 tx_flags, __be16 protocol)
 {
        struct ixgbe_atr_input atr_input;
-       struct tcphdr *th;
        struct iphdr *iph = ip_hdr(skb);
        struct ethhdr *eth = (struct ethhdr *)skb->data;
-       u16 vlan_id, src_port, dst_port, flex_bytes;
-       u32 src_ipv4_addr, dst_ipv4_addr;
-       u8 l4type = 0;
+       struct tcphdr *th;
+       u16 vlan_id;
 
-       /* Right now, we support IPv4 only */
-       if (protocol != htons(ETH_P_IP))
+       /* Right now, we support IPv4 w/ TCP only */
+       if (protocol != htons(ETH_P_IP) ||
+           iph->protocol != IPPROTO_TCP)
                return;
-       /* check if we're UDP or TCP */
-       if (iph->protocol == IPPROTO_TCP) {
-               th = tcp_hdr(skb);
-               src_port = th->source;
-               dst_port = th->dest;
-               l4type |= IXGBE_ATR_L4TYPE_TCP;
-               /* l4type IPv4 type is 0, no need to assign */
-       } else {
-               /* Unsupported L4 header, just bail here */
-               return;
-       }
 
        memset(&atr_input, 0, sizeof(struct ixgbe_atr_input));
 
        vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >>
                   IXGBE_TX_FLAGS_VLAN_SHIFT;
-       src_ipv4_addr = iph->saddr;
-       dst_ipv4_addr = iph->daddr;
-       flex_bytes = eth->h_proto;
+
+       th = tcp_hdr(skb);
 
        ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id);
-       ixgbe_atr_set_src_port_82599(&atr_input, dst_port);
-       ixgbe_atr_set_dst_port_82599(&atr_input, src_port);
-       ixgbe_atr_set_flex_byte_82599(&atr_input, flex_bytes);
-       ixgbe_atr_set_l4type_82599(&atr_input, l4type);
+       ixgbe_atr_set_src_port_82599(&atr_input, th->dest);
+       ixgbe_atr_set_dst_port_82599(&atr_input, th->source);
+       ixgbe_atr_set_flex_byte_82599(&atr_input, eth->h_proto);
+       ixgbe_atr_set_l4type_82599(&atr_input, IXGBE_ATR_L4TYPE_TCP);
        /* src and dst are inverted, think how the receiver sees them */
-       ixgbe_atr_set_src_ipv4_82599(&atr_input, dst_ipv4_addr);
-       ixgbe_atr_set_dst_ipv4_82599(&atr_input, src_ipv4_addr);
+       ixgbe_atr_set_src_ipv4_82599(&atr_input, iph->daddr);
+       ixgbe_atr_set_dst_ipv4_82599(&atr_input, iph->saddr);
 
        /* This assumes the Rx queue and Tx queue are bound to the same CPU */
        ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue);
@@ -7011,6 +7085,13 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev,
                goto err_sw_init;
 
        switch (pdev->device) {
+       case IXGBE_DEV_ID_82599_COMBO_BACKPLANE:
+               /* All except this subdevice support WOL */
+               if (pdev->subsystem_device ==
+                   IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) {
+                       adapter->wol = 0;
+                       break;
+               }
        case IXGBE_DEV_ID_82599_KX4:
                adapter->wol = (IXGBE_WUFC_MAG | IXGBE_WUFC_EX |
                                IXGBE_WUFC_MC | IXGBE_WUFC_BC);