]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - drivers/net/ixgbe/ixgbe_main.c
ixgbe: combine some stats into a union to allow for Tx/Rx stats overlap
[net-next-2.6.git] / drivers / net / ixgbe / ixgbe_main.c
index a137f9dbaacd7ee90e410bdf246acedbb38faa7c..a47e0909816617c6f3118ddc233cf42590e893d2 100644 (file)
@@ -600,18 +600,17 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter,
        }
 }
 
-void ixgbe_unmap_and_free_tx_resource(struct ixgbe_adapter *adapter,
-                                     struct ixgbe_tx_buffer
-                                     *tx_buffer_info)
+void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *tx_ring,
+                                     struct ixgbe_tx_buffer *tx_buffer_info)
 {
        if (tx_buffer_info->dma) {
                if (tx_buffer_info->mapped_as_page)
-                       dma_unmap_page(&adapter->pdev->dev,
+                       dma_unmap_page(tx_ring->dev,
                                       tx_buffer_info->dma,
                                       tx_buffer_info->length,
                                       DMA_TO_DEVICE);
                else
-                       dma_unmap_single(&adapter->pdev->dev,
+                       dma_unmap_single(tx_ring->dev,
                                         tx_buffer_info->dma,
                                         tx_buffer_info->length,
                                         DMA_TO_DEVICE);
@@ -704,8 +703,8 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
                      "  time_stamp           <%lx>\n"
                      "  jiffies              <%lx>\n",
                      tx_ring->queue_index,
-                     IXGBE_READ_REG(hw, tx_ring->head),
-                     IXGBE_READ_REG(hw, tx_ring->tail),
+                     IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
+                     IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
                      tx_ring->next_to_use, eop,
                      tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
                return true;
@@ -749,45 +748,23 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
                bool cleaned = false;
                rmb(); /* read buffer_info after eop_desc */
                for ( ; !cleaned; count++) {
-                       struct sk_buff *skb;
                        tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
                        tx_buffer_info = &tx_ring->tx_buffer_info[i];
-                       cleaned = (i == eop);
-                       skb = tx_buffer_info->skb;
-
-                       if (cleaned && skb) {
-                               unsigned int segs, bytecount;
-                               unsigned int hlen = skb_headlen(skb);
-
-                               /* gso_segs is currently only valid for tcp */
-                               segs = skb_shinfo(skb)->gso_segs ?: 1;
-#ifdef IXGBE_FCOE
-                               /* adjust for FCoE Sequence Offload */
-                               if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
-                                   && skb_is_gso(skb)
-                                   && vlan_get_protocol(skb) ==
-                                   htons(ETH_P_FCOE)) {
-                                       hlen = skb_transport_offset(skb) +
-                                               sizeof(struct fc_frame_header) +
-                                               sizeof(struct fcoe_crc_eof);
-                                       segs = DIV_ROUND_UP(skb->len - hlen,
-                                               skb_shinfo(skb)->gso_size);
-                               }
-#endif /* IXGBE_FCOE */
-                               /* multiply data chunks by size of headers */
-                               bytecount = ((segs - 1) * hlen) + skb->len;
-                               total_packets += segs;
-                               total_bytes += bytecount;
-                       }
-
-                       ixgbe_unmap_and_free_tx_resource(adapter,
-                                                        tx_buffer_info);
 
                        tx_desc->wb.status = 0;
+                       cleaned = (i == eop);
 
                        i++;
                        if (i == tx_ring->count)
                                i = 0;
+
+                       if (cleaned && tx_buffer_info->skb) {
+                               total_bytes += tx_buffer_info->bytecount;
+                               total_packets += tx_buffer_info->gso_segs;
+                       }
+
+                       ixgbe_unmap_and_free_tx_resource(tx_ring,
+                                                        tx_buffer_info);
                }
 
                eop = tx_ring->tx_buffer_info[i].next_to_watch;
@@ -806,7 +783,7 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
                if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
                    !test_bit(__IXGBE_DOWN, &adapter->state)) {
                        netif_wake_subqueue(netdev, tx_ring->queue_index);
-                       ++tx_ring->restart_queue;
+                       ++tx_ring->tx_stats.restart_queue;
                }
        }
 
@@ -1013,8 +990,7 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
        skb->ip_summed = CHECKSUM_UNNECESSARY;
 }
 
-static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
-                                        struct ixgbe_ring *rx_ring, u32 val)
+static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
 {
        /*
         * Force memory writes to complete before letting h/w
@@ -1023,7 +999,7 @@ static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
         * such as IA-64).
         */
        wmb();
-       IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->reg_idx), val);
+       writel(val, rx_ring->tail);
 }
 
 /**
@@ -1032,63 +1008,69 @@ static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
  **/
 void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
                            struct ixgbe_ring *rx_ring,
-                           int cleaned_count)
+                           u16 cleaned_count)
 {
-       struct net_device *netdev = adapter->netdev;
-       struct pci_dev *pdev = adapter->pdev;
        union ixgbe_adv_rx_desc *rx_desc;
        struct ixgbe_rx_buffer *bi;
-       unsigned int i;
-       unsigned int bufsz = rx_ring->rx_buf_len;
-
-       i = rx_ring->next_to_use;
-       bi = &rx_ring->rx_buffer_info[i];
+       struct sk_buff *skb;
+       u16 i = rx_ring->next_to_use;
 
        while (cleaned_count--) {
                rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
+               bi = &rx_ring->rx_buffer_info[i];
+               skb = bi->skb;
 
-               if (!bi->page_dma &&
-                   (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) {
-                       if (!bi->page) {
-                               bi->page = netdev_alloc_page(netdev);
-                               if (!bi->page) {
-                                       adapter->alloc_rx_page_failed++;
-                                       goto no_buffers;
-                               }
-                               bi->page_offset = 0;
-                       } else {
-                               /* use a half page if we're re-using */
-                               bi->page_offset ^= (PAGE_SIZE / 2);
-                       }
-
-                       bi->page_dma = dma_map_page(&pdev->dev, bi->page,
-                                                   bi->page_offset,
-                                                   (PAGE_SIZE / 2),
-                                                   DMA_FROM_DEVICE);
-               }
-
-               if (!bi->skb) {
-                       struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev,
-                                                                       bufsz);
-                       bi->skb = skb;
-
+               if (!skb) {
+                       skb = netdev_alloc_skb_ip_align(adapter->netdev,
+                                                       rx_ring->rx_buf_len);
                        if (!skb) {
-                               adapter->alloc_rx_buff_failed++;
+                               rx_ring->rx_stats.alloc_rx_buff_failed++;
                                goto no_buffers;
                        }
                        /* initialize queue mapping */
                        skb_record_rx_queue(skb, rx_ring->queue_index);
+                       bi->skb = skb;
                }
 
                if (!bi->dma) {
-                       bi->dma = dma_map_single(&pdev->dev,
-                                                bi->skb->data,
+                       bi->dma = dma_map_single(rx_ring->dev,
+                                                skb->data,
                                                 rx_ring->rx_buf_len,
                                                 DMA_FROM_DEVICE);
+                       if (dma_mapping_error(rx_ring->dev, bi->dma)) {
+                               rx_ring->rx_stats.alloc_rx_buff_failed++;
+                               bi->dma = 0;
+                               goto no_buffers;
+                       }
                }
-               /* Refresh the desc even if buffer_addrs didn't change because
-                * each write-back erases this info. */
+
                if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
+                       if (!bi->page) {
+                               bi->page = netdev_alloc_page(adapter->netdev);
+                               if (!bi->page) {
+                                       rx_ring->rx_stats.alloc_rx_page_failed++;
+                                       goto no_buffers;
+                               }
+                       }
+
+                       if (!bi->page_dma) {
+                               /* use a half page if we're re-using */
+                               bi->page_offset ^= PAGE_SIZE / 2;
+                               bi->page_dma = dma_map_page(rx_ring->dev,
+                                                           bi->page,
+                                                           bi->page_offset,
+                                                           PAGE_SIZE / 2,
+                                                           DMA_FROM_DEVICE);
+                               if (dma_mapping_error(rx_ring->dev,
+                                                     bi->page_dma)) {
+                                       rx_ring->rx_stats.alloc_rx_page_failed++;
+                                       bi->page_dma = 0;
+                                       goto no_buffers;
+                               }
+                       }
+
+                       /* Refresh the desc even if buffer_addrs didn't change
+                        * because each write-back erases this info. */
                        rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
                        rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
                } else {
@@ -1099,16 +1081,12 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
                i++;
                if (i == rx_ring->count)
                        i = 0;
-               bi = &rx_ring->rx_buffer_info[i];
        }
 
 no_buffers:
        if (rx_ring->next_to_use != i) {
                rx_ring->next_to_use = i;
-               if (i-- == 0)
-                       i = (rx_ring->count - 1);
-
-               ixgbe_release_rx_desc(&adapter->hw, rx_ring, i);
+               ixgbe_release_rx_desc(rx_ring, i);
        }
 }
 
@@ -1171,7 +1149,6 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                               int *work_done, int work_to_do)
 {
        struct ixgbe_adapter *adapter = q_vector->adapter;
-       struct pci_dev *pdev = adapter->pdev;
        union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
        struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
        struct sk_buff *skb;
@@ -1228,7 +1205,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                                IXGBE_RSC_CB(skb)->delay_unmap = true;
                                IXGBE_RSC_CB(skb)->dma = rx_buffer_info->dma;
                        } else {
-                               dma_unmap_single(&pdev->dev,
+                               dma_unmap_single(rx_ring->dev,
                                                 rx_buffer_info->dma,
                                                 rx_ring->rx_buf_len,
                                                 DMA_FROM_DEVICE);
@@ -1238,8 +1215,10 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                }
 
                if (upper_len) {
-                       dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
-                                      PAGE_SIZE / 2, DMA_FROM_DEVICE);
+                       dma_unmap_page(rx_ring->dev,
+                                      rx_buffer_info->page_dma,
+                                      PAGE_SIZE / 2,
+                                      DMA_FROM_DEVICE);
                        rx_buffer_info->page_dma = 0;
                        skb_fill_page_desc(skb, skb_shinfo(skb)->nr_frags,
                                           rx_buffer_info->page,
@@ -1279,10 +1258,10 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                if (staterr & IXGBE_RXD_STAT_EOP) {
                        if (skb->prev)
                                skb = ixgbe_transform_rsc_queue(skb,
-                                                               &(rx_ring->rsc_count));
+                                               &(rx_ring->rx_stats.rsc_count));
                        if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
                                if (IXGBE_RSC_CB(skb)->delay_unmap) {
-                                       dma_unmap_single(&pdev->dev,
+                                       dma_unmap_single(rx_ring->dev,
                                                         IXGBE_RSC_CB(skb)->dma,
                                                         rx_ring->rx_buf_len,
                                                         DMA_FROM_DEVICE);
@@ -1290,11 +1269,11 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                                        IXGBE_RSC_CB(skb)->delay_unmap = false;
                                }
                                if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
-                                       rx_ring->rsc_count +=
-                                               skb_shinfo(skb)->nr_frags;
+                                       rx_ring->rx_stats.rsc_count +=
+                                                skb_shinfo(skb)->nr_frags;
                                else
-                                       rx_ring->rsc_count++;
-                               rx_ring->rsc_flush++;
+                                       rx_ring->rx_stats.rsc_count++;
+                               rx_ring->rx_stats.rsc_flush++;
                        }
                        u64_stats_update_begin(&rx_ring->syncp);
                        rx_ring->stats.packets++;
@@ -1310,7 +1289,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
                                skb->next = next_buffer->skb;
                                skb->next->prev = skb;
                        }
-                       rx_ring->non_eop_descs++;
+                       rx_ring->rx_stats.non_eop_descs++;
                        goto next_desc;
                }
 
@@ -2484,8 +2463,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
                        ring->count * sizeof(union ixgbe_adv_tx_desc));
        IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
        IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
-       ring->head = IXGBE_TDH(reg_idx);
-       ring->tail = IXGBE_TDT(reg_idx);
+       ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx);
 
        /* configure fetching thresholds */
        if (adapter->rx_itr_setting == 0) {
@@ -2810,8 +2788,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
                        ring->count * sizeof(union ixgbe_adv_rx_desc));
        IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
        IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
-       ring->head = IXGBE_RDH(reg_idx);
-       ring->tail = IXGBE_RDT(reg_idx);
+       ring->tail = hw->hw_addr + IXGBE_RDT(reg_idx);
 
        ixgbe_configure_srrctl(adapter, ring);
        ixgbe_configure_rscctl(adapter, ring);
@@ -3366,9 +3343,9 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
                max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
 #endif
 
-       ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, max_frame,
+       ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
                                        DCB_TX_CONFIG);
-       ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, max_frame,
+       ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
                                        DCB_RX_CONFIG);
 
        /* reconfigure the hardware */
@@ -3687,15 +3664,13 @@ void ixgbe_reset(struct ixgbe_adapter *adapter)
 
 /**
  * ixgbe_clean_rx_ring - Free Rx Buffers per Queue
- * @adapter: board private structure
  * @rx_ring: ring to free buffers from
  **/
-static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
-                               struct ixgbe_ring *rx_ring)
+static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring)
 {
-       struct pci_dev *pdev = adapter->pdev;
+       struct device *dev = rx_ring->dev;
        unsigned long size;
-       unsigned int i;
+       u16 i;
 
        /* ring already cleared, nothing to do */
        if (!rx_ring->rx_buffer_info)
@@ -3707,7 +3682,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
 
                rx_buffer_info = &rx_ring->rx_buffer_info[i];
                if (rx_buffer_info->dma) {
-                       dma_unmap_single(&pdev->dev, rx_buffer_info->dma,
+                       dma_unmap_single(rx_ring->dev, rx_buffer_info->dma,
                                         rx_ring->rx_buf_len,
                                         DMA_FROM_DEVICE);
                        rx_buffer_info->dma = 0;
@@ -3718,7 +3693,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
                        do {
                                struct sk_buff *this = skb;
                                if (IXGBE_RSC_CB(this)->delay_unmap) {
-                                       dma_unmap_single(&pdev->dev,
+                                       dma_unmap_single(dev,
                                                         IXGBE_RSC_CB(this)->dma,
                                                         rx_ring->rx_buf_len,
                                                         DMA_FROM_DEVICE);
@@ -3732,7 +3707,7 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
                if (!rx_buffer_info->page)
                        continue;
                if (rx_buffer_info->page_dma) {
-                       dma_unmap_page(&pdev->dev, rx_buffer_info->page_dma,
+                       dma_unmap_page(dev, rx_buffer_info->page_dma,
                                       PAGE_SIZE / 2, DMA_FROM_DEVICE);
                        rx_buffer_info->page_dma = 0;
                }
@@ -3749,24 +3724,17 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
 
        rx_ring->next_to_clean = 0;
        rx_ring->next_to_use = 0;
-
-       if (rx_ring->head)
-               writel(0, adapter->hw.hw_addr + rx_ring->head);
-       if (rx_ring->tail)
-               writel(0, adapter->hw.hw_addr + rx_ring->tail);
 }
 
 /**
  * ixgbe_clean_tx_ring - Free Tx Buffers
- * @adapter: board private structure
  * @tx_ring: ring to be cleaned
  **/
-static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
-                               struct ixgbe_ring *tx_ring)
+static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring)
 {
        struct ixgbe_tx_buffer *tx_buffer_info;
        unsigned long size;
-       unsigned int i;
+       u16 i;
 
        /* ring already cleared, nothing to do */
        if (!tx_ring->tx_buffer_info)
@@ -3775,7 +3743,7 @@ static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
        /* Free all the Tx ring sk_buffs */
        for (i = 0; i < tx_ring->count; i++) {
                tx_buffer_info = &tx_ring->tx_buffer_info[i];
-               ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+               ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
        }
 
        size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
@@ -3786,11 +3754,6 @@ static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
 
        tx_ring->next_to_use = 0;
        tx_ring->next_to_clean = 0;
-
-       if (tx_ring->head)
-               writel(0, adapter->hw.hw_addr + tx_ring->head);
-       if (tx_ring->tail)
-               writel(0, adapter->hw.hw_addr + tx_ring->tail);
 }
 
 /**
@@ -3802,7 +3765,7 @@ static void ixgbe_clean_all_rx_rings(struct ixgbe_adapter *adapter)
        int i;
 
        for (i = 0; i < adapter->num_rx_queues; i++)
-               ixgbe_clean_rx_ring(adapter, adapter->rx_ring[i]);
+               ixgbe_clean_rx_ring(adapter->rx_ring[i]);
 }
 
 /**
@@ -3814,7 +3777,7 @@ static void ixgbe_clean_all_tx_rings(struct ixgbe_adapter *adapter)
        int i;
 
        for (i = 0; i < adapter->num_tx_queues; i++)
-               ixgbe_clean_tx_ring(adapter, adapter->tx_ring[i]);
+               ixgbe_clean_tx_ring(adapter->tx_ring[i]);
 }
 
 void ixgbe_down(struct ixgbe_adapter *adapter)
@@ -4472,6 +4435,7 @@ static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter)
 static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
 {
        int i;
+       int rx_count;
        int orig_node = adapter->node;
 
        for (i = 0; i < adapter->num_tx_queues; i++) {
@@ -4490,6 +4454,7 @@ static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
                        goto err_tx_ring_allocation;
                ring->count = adapter->tx_ring_count;
                ring->queue_index = i;
+               ring->dev = &adapter->pdev->dev;
                ring->numa_node = adapter->node;
 
                adapter->tx_ring[i] = ring;
@@ -4498,6 +4463,7 @@ static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
        /* Restore the adapter's original node */
        adapter->node = orig_node;
 
+       rx_count = adapter->rx_ring_count;
        for (i = 0; i < adapter->num_rx_queues; i++) {
                struct ixgbe_ring *ring = adapter->rx_ring[i];
                if (orig_node == -1) {
@@ -4512,8 +4478,9 @@ static int ixgbe_alloc_queues(struct ixgbe_adapter *adapter)
                        ring = kzalloc(sizeof(struct ixgbe_ring), GFP_KERNEL);
                if (!ring)
                        goto err_rx_ring_allocation;
-               ring->count = adapter->rx_ring_count;
+               ring->count = rx_count;
                ring->queue_index = i;
+               ring->dev = &adapter->pdev->dev;
                ring->numa_node = adapter->node;
 
                adapter->rx_ring[i] = ring;
@@ -4854,6 +4821,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
        int j;
        struct tc_configuration *tc;
 #endif
+       int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
 
        /* PCI config space info */
 
@@ -4930,8 +4898,8 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
 #ifdef CONFIG_DCB
        adapter->last_lfc_mode = hw->fc.current_mode;
 #endif
-       hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
-       hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
+       hw->fc.high_water = FC_HIGH_WATER(max_frame);
+       hw->fc.low_water = FC_LOW_WATER(max_frame);
        hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
        hw->fc.send_xon = true;
        hw->fc.disable_fc_autoneg = false;
@@ -4969,15 +4937,13 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
 
 /**
  * ixgbe_setup_tx_resources - allocate Tx resources (Descriptors)
- * @adapter: board private structure
  * @tx_ring:    tx descriptor ring (for a specific queue) to setup
  *
  * Return 0 on success, negative on failure
  **/
-int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
-                            struct ixgbe_ring *tx_ring)
+int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring)
 {
-       struct pci_dev *pdev = adapter->pdev;
+       struct device *dev = tx_ring->dev;
        int size;
 
        size = sizeof(struct ixgbe_tx_buffer) * tx_ring->count;
@@ -4992,7 +4958,7 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
        tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc);
        tx_ring->size = ALIGN(tx_ring->size, 4096);
 
-       tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+       tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
                                           &tx_ring->dma, GFP_KERNEL);
        if (!tx_ring->desc)
                goto err;
@@ -5005,7 +4971,7 @@ int ixgbe_setup_tx_resources(struct ixgbe_adapter *adapter,
 err:
        vfree(tx_ring->tx_buffer_info);
        tx_ring->tx_buffer_info = NULL;
-       e_err(probe, "Unable to allocate memory for the Tx descriptor ring\n");
+       dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
        return -ENOMEM;
 }
 
@@ -5024,7 +4990,7 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
        int i, err = 0;
 
        for (i = 0; i < adapter->num_tx_queues; i++) {
-               err = ixgbe_setup_tx_resources(adapter, adapter->tx_ring[i]);
+               err = ixgbe_setup_tx_resources(adapter->tx_ring[i]);
                if (!err)
                        continue;
                e_err(probe, "Allocation for Tx Queue %u failed\n", i);
@@ -5036,48 +5002,41 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter)
 
 /**
  * ixgbe_setup_rx_resources - allocate Rx resources (Descriptors)
- * @adapter: board private structure
  * @rx_ring:    rx descriptor ring (for a specific queue) to setup
  *
  * Returns 0 on success, negative on failure
  **/
-int ixgbe_setup_rx_resources(struct ixgbe_adapter *adapter,
-                            struct ixgbe_ring *rx_ring)
+int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring)
 {
-       struct pci_dev *pdev = adapter->pdev;
+       struct device *dev = rx_ring->dev;
        int size;
 
        size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count;
-       rx_ring->rx_buffer_info = vmalloc_node(size, adapter->node);
+       rx_ring->rx_buffer_info = vmalloc_node(size, rx_ring->numa_node);
        if (!rx_ring->rx_buffer_info)
                rx_ring->rx_buffer_info = vmalloc(size);
-       if (!rx_ring->rx_buffer_info) {
-               e_err(probe, "vmalloc allocation failed for the Rx "
-                     "descriptor ring\n");
-               goto alloc_failed;
-       }
+       if (!rx_ring->rx_buffer_info)
+               goto err;
        memset(rx_ring->rx_buffer_info, 0, size);
 
        /* Round up to nearest 4K */
        rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc);
        rx_ring->size = ALIGN(rx_ring->size, 4096);
 
-       rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+       rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
                                           &rx_ring->dma, GFP_KERNEL);
 
-       if (!rx_ring->desc) {
-               e_err(probe, "Memory allocation failed for the Rx "
-                     "descriptor ring\n");
-               vfree(rx_ring->rx_buffer_info);
-               goto alloc_failed;
-       }
+       if (!rx_ring->desc)
+               goto err;
 
        rx_ring->next_to_clean = 0;
        rx_ring->next_to_use = 0;
 
        return 0;
-
-alloc_failed:
+err:
+       vfree(rx_ring->rx_buffer_info);
+       rx_ring->rx_buffer_info = NULL;
+       dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
        return -ENOMEM;
 }
 
@@ -5091,13 +5050,12 @@ alloc_failed:
  *
  * Return 0 on success, negative on failure
  **/
-
 static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
 {
        int i, err = 0;
 
        for (i = 0; i < adapter->num_rx_queues; i++) {
-               err = ixgbe_setup_rx_resources(adapter, adapter->rx_ring[i]);
+               err = ixgbe_setup_rx_resources(adapter->rx_ring[i]);
                if (!err)
                        continue;
                e_err(probe, "Allocation for Rx Queue %u failed\n", i);
@@ -5109,23 +5067,23 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter)
 
 /**
  * ixgbe_free_tx_resources - Free Tx Resources per Queue
- * @adapter: board private structure
  * @tx_ring: Tx descriptor ring for a specific queue
  *
  * Free all transmit software resources
  **/
-void ixgbe_free_tx_resources(struct ixgbe_adapter *adapter,
-                            struct ixgbe_ring *tx_ring)
+void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring)
 {
-       struct pci_dev *pdev = adapter->pdev;
-
-       ixgbe_clean_tx_ring(adapter, tx_ring);
+       ixgbe_clean_tx_ring(tx_ring);
 
        vfree(tx_ring->tx_buffer_info);
        tx_ring->tx_buffer_info = NULL;
 
-       dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
-                         tx_ring->dma);
+       /* if not set, then don't free */
+       if (!tx_ring->desc)
+               return;
+
+       dma_free_coherent(tx_ring->dev, tx_ring->size,
+                         tx_ring->desc, tx_ring->dma);
 
        tx_ring->desc = NULL;
 }
@@ -5142,28 +5100,28 @@ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter)
 
        for (i = 0; i < adapter->num_tx_queues; i++)
                if (adapter->tx_ring[i]->desc)
-                       ixgbe_free_tx_resources(adapter, adapter->tx_ring[i]);
+                       ixgbe_free_tx_resources(adapter->tx_ring[i]);
 }
 
 /**
  * ixgbe_free_rx_resources - Free Rx Resources
- * @adapter: board private structure
  * @rx_ring: ring to clean the resources from
  *
  * Free all receive software resources
  **/
-void ixgbe_free_rx_resources(struct ixgbe_adapter *adapter,
-                            struct ixgbe_ring *rx_ring)
+void ixgbe_free_rx_resources(struct ixgbe_ring *rx_ring)
 {
-       struct pci_dev *pdev = adapter->pdev;
-
-       ixgbe_clean_rx_ring(adapter, rx_ring);
+       ixgbe_clean_rx_ring(rx_ring);
 
        vfree(rx_ring->rx_buffer_info);
        rx_ring->rx_buffer_info = NULL;
 
-       dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
-                         rx_ring->dma);
+       /* if not set, then don't free */
+       if (!rx_ring->desc)
+               return;
+
+       dma_free_coherent(rx_ring->dev, rx_ring->size,
+                         rx_ring->desc, rx_ring->dma);
 
        rx_ring->desc = NULL;
 }
@@ -5180,7 +5138,7 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
 
        for (i = 0; i < adapter->num_rx_queues; i++)
                if (adapter->rx_ring[i]->desc)
-                       ixgbe_free_rx_resources(adapter, adapter->rx_ring[i]);
+                       ixgbe_free_rx_resources(adapter->rx_ring[i]);
 }
 
 /**
@@ -5193,6 +5151,7 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
 static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       struct ixgbe_hw *hw = &adapter->hw;
        int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
 
        /* MTU < 68 is an error and causes problems on some kernels */
@@ -5203,6 +5162,9 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
        /* must set new MTU before calling down or up */
        netdev->mtu = new_mtu;
 
+       hw->fc.high_water = FC_HIGH_WATER(max_frame);
+       hw->fc.low_water = FC_LOW_WATER(max_frame);
+
        if (netif_running(netdev))
                ixgbe_reinit_locked(adapter);
 
@@ -5444,10 +5406,12 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
 {
        struct net_device *netdev = adapter->netdev;
        struct ixgbe_hw *hw = &adapter->hw;
+       struct ixgbe_hw_stats *hwstats = &adapter->stats;
        u64 total_mpc = 0;
        u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
-       u64 non_eop_descs = 0, restart_queue = 0;
-       struct ixgbe_hw_stats *hwstats = &adapter->stats;
+       u64 non_eop_descs = 0, restart_queue = 0, tx_busy = 0;
+       u64 alloc_rx_page_failed = 0, alloc_rx_buff_failed = 0;
+       u64 bytes = 0, packets = 0;
 
        if (test_bit(__IXGBE_DOWN, &adapter->state) ||
            test_bit(__IXGBE_RESETTING, &adapter->state))
@@ -5460,21 +5424,41 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
                        adapter->hw_rx_no_dma_resources +=
                                IXGBE_READ_REG(hw, IXGBE_QPRDC(i));
                for (i = 0; i < adapter->num_rx_queues; i++) {
-                       rsc_count += adapter->rx_ring[i]->rsc_count;
-                       rsc_flush += adapter->rx_ring[i]->rsc_flush;
+                       rsc_count += adapter->rx_ring[i]->rx_stats.rsc_count;
+                       rsc_flush += adapter->rx_ring[i]->rx_stats.rsc_flush;
                }
                adapter->rsc_total_count = rsc_count;
                adapter->rsc_total_flush = rsc_flush;
        }
 
+       for (i = 0; i < adapter->num_rx_queues; i++) {
+               struct ixgbe_ring *rx_ring = adapter->rx_ring[i];
+               non_eop_descs += rx_ring->rx_stats.non_eop_descs;
+               alloc_rx_page_failed += rx_ring->rx_stats.alloc_rx_page_failed;
+               alloc_rx_buff_failed += rx_ring->rx_stats.alloc_rx_buff_failed;
+               bytes += rx_ring->stats.bytes;
+               packets += rx_ring->stats.packets;
+       }
+       adapter->non_eop_descs = non_eop_descs;
+       adapter->alloc_rx_page_failed = alloc_rx_page_failed;
+       adapter->alloc_rx_buff_failed = alloc_rx_buff_failed;
+       netdev->stats.rx_bytes = bytes;
+       netdev->stats.rx_packets = packets;
+
+       bytes = 0;
+       packets = 0;
        /* gather some stats to the adapter struct that are per queue */
-       for (i = 0; i < adapter->num_tx_queues; i++)
-               restart_queue += adapter->tx_ring[i]->restart_queue;
+       for (i = 0; i < adapter->num_tx_queues; i++) {
+               struct ixgbe_ring *tx_ring = adapter->tx_ring[i];
+               restart_queue += tx_ring->tx_stats.restart_queue;
+               tx_busy += tx_ring->tx_stats.tx_busy;
+               bytes += tx_ring->stats.bytes;
+               packets += tx_ring->stats.packets;
+       }
        adapter->restart_queue = restart_queue;
-
-       for (i = 0; i < adapter->num_rx_queues; i++)
-               non_eop_descs += adapter->rx_ring[i]->non_eop_descs;
-       adapter->non_eop_descs = non_eop_descs;
+       adapter->tx_busy = tx_busy;
+       netdev->stats.tx_bytes = bytes;
+       netdev->stats.tx_packets = packets;
 
        hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS);
        for (i = 0; i < 8; i++) {
@@ -6010,15 +5994,17 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
 static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
                        struct ixgbe_ring *tx_ring,
                        struct sk_buff *skb, u32 tx_flags,
-                       unsigned int first)
+                       unsigned int first, const u8 hdr_len)
 {
-       struct pci_dev *pdev = adapter->pdev;
+       struct device *dev = tx_ring->dev;
        struct ixgbe_tx_buffer *tx_buffer_info;
        unsigned int len;
        unsigned int total = skb->len;
        unsigned int offset = 0, size, count = 0, i;
        unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
        unsigned int f;
+       unsigned int bytecount = skb->len;
+       u16 gso_segs = 1;
 
        i = tx_ring->next_to_use;
 
@@ -6033,10 +6019,10 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
 
                tx_buffer_info->length = size;
                tx_buffer_info->mapped_as_page = false;
-               tx_buffer_info->dma = dma_map_single(&pdev->dev,
+               tx_buffer_info->dma = dma_map_single(dev,
                                                     skb->data + offset,
                                                     size, DMA_TO_DEVICE);
-               if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
+               if (dma_mapping_error(dev, tx_buffer_info->dma))
                        goto dma_error;
                tx_buffer_info->time_stamp = jiffies;
                tx_buffer_info->next_to_watch = i;
@@ -6069,12 +6055,12 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
                        size = min(len, (uint)IXGBE_MAX_DATA_PER_TXD);
 
                        tx_buffer_info->length = size;
-                       tx_buffer_info->dma = dma_map_page(&adapter->pdev->dev,
+                       tx_buffer_info->dma = dma_map_page(dev,
                                                           frag->page,
                                                           offset, size,
                                                           DMA_TO_DEVICE);
                        tx_buffer_info->mapped_as_page = true;
-                       if (dma_mapping_error(&pdev->dev, tx_buffer_info->dma))
+                       if (dma_mapping_error(dev, tx_buffer_info->dma))
                                goto dma_error;
                        tx_buffer_info->time_stamp = jiffies;
                        tx_buffer_info->next_to_watch = i;
@@ -6088,6 +6074,19 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
                        break;
        }
 
+       if (tx_flags & IXGBE_TX_FLAGS_TSO)
+               gso_segs = skb_shinfo(skb)->gso_segs;
+#ifdef IXGBE_FCOE
+       /* adjust for FCoE Sequence Offload */
+       else if (tx_flags & IXGBE_TX_FLAGS_FSO)
+               gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
+                                       skb_shinfo(skb)->gso_size);
+#endif /* IXGBE_FCOE */
+       bytecount += (gso_segs - 1) * hdr_len;
+
+       /* multiply data chunks by size of headers */
+       tx_ring->tx_buffer_info[i].bytecount = bytecount;
+       tx_ring->tx_buffer_info[i].gso_segs = gso_segs;
        tx_ring->tx_buffer_info[i].skb = skb;
        tx_ring->tx_buffer_info[first].next_to_watch = i;
 
@@ -6109,14 +6108,13 @@ dma_error:
                        i += tx_ring->count;
                i--;
                tx_buffer_info = &tx_ring->tx_buffer_info[i];
-               ixgbe_unmap_and_free_tx_resource(adapter, tx_buffer_info);
+               ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
        }
 
        return 0;
 }
 
-static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
-                          struct ixgbe_ring *tx_ring,
+static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring,
                           int tx_flags, int count, u32 paylen, u8 hdr_len)
 {
        union ixgbe_adv_tx_desc *tx_desc = NULL;
@@ -6181,7 +6179,7 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
        wmb();
 
        tx_ring->next_to_use = i;
-       writel(i, adapter->hw.hw_addr + tx_ring->tail);
+       writel(i, tx_ring->tail);
 }
 
 static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
@@ -6247,7 +6245,7 @@ static int __ixgbe_maybe_stop_tx(struct net_device *netdev,
 
        /* A reprieve! - use start_queue because it doesn't call schedule */
        netif_start_subqueue(netdev, tx_ring->queue_index);
-       ++tx_ring->restart_queue;
+       ++tx_ring->tx_stats.restart_queue;
        return 0;
 }
 
@@ -6363,7 +6361,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
                count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
 
        if (ixgbe_maybe_stop_tx(netdev, tx_ring, count)) {
-               adapter->tx_busy++;
+               tx_ring->tx_stats.tx_busy++;
                return NETDEV_TX_BUSY;
        }
 
@@ -6397,7 +6395,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
                        tx_flags |= IXGBE_TX_FLAGS_CSUM;
        }
 
-       count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first);
+       count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len);
        if (count) {
                /* add the ATR filter if ATR is on */
                if (tx_ring->atr_sample_rate) {
@@ -6413,8 +6411,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
                txq = netdev_get_tx_queue(netdev, tx_ring->queue_index);
                txq->tx_bytes += skb->len;
                txq->tx_packets++;
-               ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len,
-                              hdr_len);
+               ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len);
                ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
 
        } else {