]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - drivers/net/ixgbe/ixgbe_main.c
ixgbe: drop ring->head, make ring->tail a pointer instead of offset
[net-next-2.6.git] / drivers / net / ixgbe / ixgbe_main.c
index 2bd3eb4ee5a1996d9c219c90419c0b9a455c0b5b..8f2afaa35dd92e60cd772eb59dc3e52fc19dba13 100644 (file)
@@ -704,8 +704,8 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_adapter *adapter,
                      "  time_stamp           <%lx>\n"
                      "  jiffies              <%lx>\n",
                      tx_ring->queue_index,
-                     IXGBE_READ_REG(hw, tx_ring->head),
-                     IXGBE_READ_REG(hw, tx_ring->tail),
+                     IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
+                     IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
                      tx_ring->next_to_use, eop,
                      tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
                return true;
@@ -749,44 +749,23 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
                bool cleaned = false;
                rmb(); /* read buffer_info after eop_desc */
                for ( ; !cleaned; count++) {
-                       struct sk_buff *skb;
                        tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
                        tx_buffer_info = &tx_ring->tx_buffer_info[i];
+
+                       tx_desc->wb.status = 0;
                        cleaned = (i == eop);
-                       skb = tx_buffer_info->skb;
 
-                       if (cleaned && skb) {
-                               unsigned int segs, bytecount;
-                               unsigned int hlen = skb_headlen(skb);
+                       i++;
+                       if (i == tx_ring->count)
+                               i = 0;
 
-                               /* gso_segs is currently only valid for tcp */
-                               segs = skb_shinfo(skb)->gso_segs ?: 1;
-#ifdef IXGBE_FCOE
-                               /* adjust for FCoE Sequence Offload */
-                               if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
-                                   && (skb->protocol == htons(ETH_P_FCOE)) &&
-                                   skb_is_gso(skb)) {
-                                       hlen = skb_transport_offset(skb) +
-                                               sizeof(struct fc_frame_header) +
-                                               sizeof(struct fcoe_crc_eof);
-                                       segs = DIV_ROUND_UP(skb->len - hlen,
-                                               skb_shinfo(skb)->gso_size);
-                               }
-#endif /* IXGBE_FCOE */
-                               /* multiply data chunks by size of headers */
-                               bytecount = ((segs - 1) * hlen) + skb->len;
-                               total_packets += segs;
-                               total_bytes += bytecount;
+                       if (cleaned && tx_buffer_info->skb) {
+                               total_bytes += tx_buffer_info->bytecount;
+                               total_packets += tx_buffer_info->gso_segs;
                        }
 
                        ixgbe_unmap_and_free_tx_resource(adapter,
                                                         tx_buffer_info);
-
-                       tx_desc->wb.status = 0;
-
-                       i++;
-                       if (i == tx_ring->count)
-                               i = 0;
                }
 
                eop = tx_ring->tx_buffer_info[i].next_to_watch;
@@ -1012,8 +991,7 @@ static inline void ixgbe_rx_checksum(struct ixgbe_adapter *adapter,
        skb->ip_summed = CHECKSUM_UNNECESSARY;
 }
 
-static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
-                                        struct ixgbe_ring *rx_ring, u32 val)
+static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
 {
        /*
         * Force memory writes to complete before letting h/w
@@ -1022,7 +1000,7 @@ static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
         * such as IA-64).
         */
        wmb();
-       IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->reg_idx), val);
+       writel(val, rx_ring->tail);
 }
 
 /**
@@ -1031,63 +1009,70 @@ static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
  **/
 void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
                            struct ixgbe_ring *rx_ring,
-                           int cleaned_count)
+                           u16 cleaned_count)
 {
-       struct net_device *netdev = adapter->netdev;
        struct pci_dev *pdev = adapter->pdev;
        union ixgbe_adv_rx_desc *rx_desc;
        struct ixgbe_rx_buffer *bi;
-       unsigned int i;
-       unsigned int bufsz = rx_ring->rx_buf_len;
-
-       i = rx_ring->next_to_use;
-       bi = &rx_ring->rx_buffer_info[i];
+       struct sk_buff *skb;
+       u16 i = rx_ring->next_to_use;
 
        while (cleaned_count--) {
                rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
+               bi = &rx_ring->rx_buffer_info[i];
+               skb = bi->skb;
 
-               if (!bi->page_dma &&
-                   (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) {
-                       if (!bi->page) {
-                               bi->page = netdev_alloc_page(netdev);
-                               if (!bi->page) {
-                                       adapter->alloc_rx_page_failed++;
-                                       goto no_buffers;
-                               }
-                               bi->page_offset = 0;
-                       } else {
-                               /* use a half page if we're re-using */
-                               bi->page_offset ^= (PAGE_SIZE / 2);
-                       }
-
-                       bi->page_dma = dma_map_page(&pdev->dev, bi->page,
-                                                   bi->page_offset,
-                                                   (PAGE_SIZE / 2),
-                                                   DMA_FROM_DEVICE);
-               }
-
-               if (!bi->skb) {
-                       struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev,
-                                                                       bufsz);
-                       bi->skb = skb;
-
+               if (!skb) {
+                       skb = netdev_alloc_skb_ip_align(adapter->netdev,
+                                                       rx_ring->rx_buf_len);
                        if (!skb) {
                                adapter->alloc_rx_buff_failed++;
                                goto no_buffers;
                        }
                        /* initialize queue mapping */
                        skb_record_rx_queue(skb, rx_ring->queue_index);
+                       bi->skb = skb;
                }
 
                if (!bi->dma) {
                        bi->dma = dma_map_single(&pdev->dev,
-                                                bi->skb->data,
+                                                skb->data,
                                                 rx_ring->rx_buf_len,
                                                 DMA_FROM_DEVICE);
+                       if (dma_mapping_error(&pdev->dev, bi->dma)) {
+                               adapter->alloc_rx_buff_failed++;
+                               bi->dma = 0;
+                               goto no_buffers;
+                       }
                }
-               /* Refresh the desc even if buffer_addrs didn't change because
-                * each write-back erases this info. */
+
                if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
+                       if (!bi->page) {
+                               bi->page = netdev_alloc_page(adapter->netdev);
+                               if (!bi->page) {
+                                       adapter->alloc_rx_page_failed++;
+                                       goto no_buffers;
+                               }
+                       }
+
+                       if (!bi->page_dma) {
+                               /* use a half page if we're re-using */
+                               bi->page_offset ^= PAGE_SIZE / 2;
+                               bi->page_dma = dma_map_page(&pdev->dev,
+                                                           bi->page,
+                                                           bi->page_offset,
+                                                           PAGE_SIZE / 2,
+                                                           DMA_FROM_DEVICE);
+                               if (dma_mapping_error(&pdev->dev,
+                                                     bi->page_dma)) {
+                                       adapter->alloc_rx_page_failed++;
+                                       bi->page_dma = 0;
+                                       goto no_buffers;
+                               }
+                       }
+
+                       /* Refresh the desc even if buffer_addrs didn't change
+                        * because each write-back erases this info. */
                        rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
                        rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
                } else {
@@ -1098,16 +1083,12 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
                i++;
                if (i == rx_ring->count)
                        i = 0;
-               bi = &rx_ring->rx_buffer_info[i];
        }
 
 no_buffers:
        if (rx_ring->next_to_use != i) {
                rx_ring->next_to_use = i;
-               if (i-- == 0)
-                       i = (rx_ring->count - 1);
-
-               ixgbe_release_rx_desc(&adapter->hw, rx_ring, i);
+               ixgbe_release_rx_desc(rx_ring, i);
        }
 }
 
@@ -2483,8 +2464,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter,
                        ring->count * sizeof(union ixgbe_adv_tx_desc));
        IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
        IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
-       ring->head = IXGBE_TDH(reg_idx);
-       ring->tail = IXGBE_TDT(reg_idx);
+       ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx);
 
        /* configure fetching thresholds */
        if (adapter->rx_itr_setting == 0) {
@@ -2809,8 +2789,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter,
                        ring->count * sizeof(union ixgbe_adv_rx_desc));
        IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
        IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
-       ring->head = IXGBE_RDH(reg_idx);
-       ring->tail = IXGBE_RDT(reg_idx);
+       ring->tail = hw->hw_addr + IXGBE_RDT(reg_idx);
 
        ixgbe_configure_srrctl(adapter, ring);
        ixgbe_configure_rscctl(adapter, ring);
@@ -3365,9 +3344,9 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
                max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
 #endif
 
-       ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, max_frame,
+       ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
                                        DCB_TX_CONFIG);
-       ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, max_frame,
+       ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
                                        DCB_RX_CONFIG);
 
        /* reconfigure the hardware */
@@ -3748,11 +3727,6 @@ static void ixgbe_clean_rx_ring(struct ixgbe_adapter *adapter,
 
        rx_ring->next_to_clean = 0;
        rx_ring->next_to_use = 0;
-
-       if (rx_ring->head)
-               writel(0, adapter->hw.hw_addr + rx_ring->head);
-       if (rx_ring->tail)
-               writel(0, adapter->hw.hw_addr + rx_ring->tail);
 }
 
 /**
@@ -3785,11 +3759,6 @@ static void ixgbe_clean_tx_ring(struct ixgbe_adapter *adapter,
 
        tx_ring->next_to_use = 0;
        tx_ring->next_to_clean = 0;
-
-       if (tx_ring->head)
-               writel(0, adapter->hw.hw_addr + tx_ring->head);
-       if (tx_ring->tail)
-               writel(0, adapter->hw.hw_addr + tx_ring->tail);
 }
 
 /**
@@ -4750,6 +4719,11 @@ err_set_interrupt:
        return err;
 }
 
+static void ring_free_rcu(struct rcu_head *head)
+{
+       kfree(container_of(head, struct ixgbe_ring, rcu));
+}
+
 /**
  * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
  * @adapter: board private structure to clear interrupt scheme on
@@ -4766,7 +4740,12 @@ void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
                adapter->tx_ring[i] = NULL;
        }
        for (i = 0; i < adapter->num_rx_queues; i++) {
-               kfree(adapter->rx_ring[i]);
+               struct ixgbe_ring *ring = adapter->rx_ring[i];
+
+               /* ixgbe_get_stats64() might access this ring, we must wait
+                * a grace period before freeing it.
+                */
+               call_rcu(&ring->rcu, ring_free_rcu);
                adapter->rx_ring[i] = NULL;
        }
 
@@ -4843,6 +4822,7 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
        int j;
        struct tc_configuration *tc;
 #endif
+       int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
 
        /* PCI config space info */
 
@@ -4919,8 +4899,8 @@ static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter)
 #ifdef CONFIG_DCB
        adapter->last_lfc_mode = hw->fc.current_mode;
 #endif
-       hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
-       hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
+       hw->fc.high_water = FC_HIGH_WATER(max_frame);
+       hw->fc.low_water = FC_LOW_WATER(max_frame);
        hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
        hw->fc.send_xon = true;
        hw->fc.disable_fc_autoneg = false;
@@ -5182,6 +5162,7 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter)
 static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
 {
        struct ixgbe_adapter *adapter = netdev_priv(netdev);
+       struct ixgbe_hw *hw = &adapter->hw;
        int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
 
        /* MTU < 68 is an error and causes problems on some kernels */
@@ -5192,6 +5173,9 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
        /* must set new MTU before calling down or up */
        netdev->mtu = new_mtu;
 
+       hw->fc.high_water = FC_HIGH_WATER(max_frame);
+       hw->fc.low_water = FC_LOW_WATER(max_frame);
+
        if (netif_running(netdev))
                ixgbe_reinit_locked(adapter);
 
@@ -5823,7 +5807,7 @@ static void ixgbe_watchdog_task(struct work_struct *work)
 
 static int ixgbe_tso(struct ixgbe_adapter *adapter,
                     struct ixgbe_ring *tx_ring, struct sk_buff *skb,
-                    u32 tx_flags, u8 *hdr_len)
+                    u32 tx_flags, u8 *hdr_len, __be16 protocol)
 {
        struct ixgbe_adv_tx_context_desc *context_desc;
        unsigned int i;
@@ -5841,7 +5825,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
                l4len = tcp_hdrlen(skb);
                *hdr_len += l4len;
 
-               if (skb->protocol == htons(ETH_P_IP)) {
+               if (protocol == htons(ETH_P_IP)) {
                        struct iphdr *iph = ip_hdr(skb);
                        iph->tot_len = 0;
                        iph->check = 0;
@@ -5880,7 +5864,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
                type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
                                   IXGBE_ADVTXD_DTYP_CTXT);
 
-               if (skb->protocol == htons(ETH_P_IP))
+               if (protocol == htons(ETH_P_IP))
                        type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
                type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
                context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
@@ -5906,16 +5890,10 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
        return false;
 }
 
-static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb)
+static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb,
+                     __be16 protocol)
 {
        u32 rtn = 0;
-       __be16 protocol;
-
-       if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
-               protocol = ((const struct vlan_ethhdr *)skb->data)->
-                                       h_vlan_encapsulated_proto;
-       else
-               protocol = skb->protocol;
 
        switch (protocol) {
        case cpu_to_be16(ETH_P_IP):
@@ -5943,7 +5921,7 @@ static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb)
        default:
                if (unlikely(net_ratelimit()))
                        e_warn(probe, "partial checksum but proto=%x!\n",
-                              skb->protocol);
+                              protocol);
                break;
        }
 
@@ -5952,7 +5930,8 @@ static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb)
 
 static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
                          struct ixgbe_ring *tx_ring,
-                         struct sk_buff *skb, u32 tx_flags)
+                         struct sk_buff *skb, u32 tx_flags,
+                         __be16 protocol)
 {
        struct ixgbe_adv_tx_context_desc *context_desc;
        unsigned int i;
@@ -5981,7 +5960,7 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
                                    IXGBE_ADVTXD_DTYP_CTXT);
 
                if (skb->ip_summed == CHECKSUM_PARTIAL)
-                       type_tucmd_mlhl |= ixgbe_psum(adapter, skb);
+                       type_tucmd_mlhl |= ixgbe_psum(adapter, skb, protocol);
 
                context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
                /* use index zero for tx checksum offload */
@@ -6004,7 +5983,7 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
 static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
                        struct ixgbe_ring *tx_ring,
                        struct sk_buff *skb, u32 tx_flags,
-                       unsigned int first)
+                       unsigned int first, const u8 hdr_len)
 {
        struct pci_dev *pdev = adapter->pdev;
        struct ixgbe_tx_buffer *tx_buffer_info;
@@ -6013,6 +5992,8 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
        unsigned int offset = 0, size, count = 0, i;
        unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
        unsigned int f;
+       unsigned int bytecount = skb->len;
+       u16 gso_segs = 1;
 
        i = tx_ring->next_to_use;
 
@@ -6082,6 +6063,19 @@ static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
                        break;
        }
 
+       if (tx_flags & IXGBE_TX_FLAGS_TSO)
+               gso_segs = skb_shinfo(skb)->gso_segs;
+#ifdef IXGBE_FCOE
+       /* adjust for FCoE Sequence Offload */
+       else if (tx_flags & IXGBE_TX_FLAGS_FSO)
+               gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
+                                       skb_shinfo(skb)->gso_size);
+#endif /* IXGBE_FCOE */
+       bytecount += (gso_segs - 1) * hdr_len;
+
+       /* multiply data chunks by size of headers */
+       tx_ring->tx_buffer_info[i].bytecount = bytecount;
+       tx_ring->tx_buffer_info[i].gso_segs = gso_segs;
        tx_ring->tx_buffer_info[i].skb = skb;
        tx_ring->tx_buffer_info[first].next_to_watch = i;
 
@@ -6109,8 +6103,7 @@ dma_error:
        return 0;
 }
 
-static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
-                          struct ixgbe_ring *tx_ring,
+static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring,
                           int tx_flags, int count, u32 paylen, u8 hdr_len)
 {
        union ixgbe_adv_tx_desc *tx_desc = NULL;
@@ -6175,11 +6168,11 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
        wmb();
 
        tx_ring->next_to_use = i;
-       writel(i, adapter->hw.hw_addr + tx_ring->tail);
+       writel(i, tx_ring->tail);
 }
 
 static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
-                     int queue, u32 tx_flags)
+                     int queue, u32 tx_flags, __be16 protocol)
 {
        struct ixgbe_atr_input atr_input;
        struct tcphdr *th;
@@ -6190,7 +6183,7 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
        u8 l4type = 0;
 
        /* Right now, we support IPv4 only */
-       if (skb->protocol != htons(ETH_P_IP))
+       if (protocol != htons(ETH_P_IP))
                return;
        /* check if we're UDP or TCP */
        if (iph->protocol == IPPROTO_TCP) {
@@ -6257,10 +6250,13 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
 {
        struct ixgbe_adapter *adapter = netdev_priv(dev);
        int txq = smp_processor_id();
-
 #ifdef IXGBE_FCOE
-       if ((skb->protocol == htons(ETH_P_FCOE)) ||
-           (skb->protocol == htons(ETH_P_FIP))) {
+       __be16 protocol;
+
+       protocol = vlan_get_protocol(skb);
+
+       if ((protocol == htons(ETH_P_FCOE)) ||
+           (protocol == htons(ETH_P_FIP))) {
                if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
                        txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
                        txq += adapter->ring_feature[RING_F_FCOE].mask;
@@ -6303,6 +6299,9 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
        int tso;
        int count = 0;
        unsigned int f;
+       __be16 protocol;
+
+       protocol = vlan_get_protocol(skb);
 
        if (vlan_tx_tag_present(skb)) {
                tx_flags |= vlan_tx_tag_get(skb);
@@ -6323,8 +6322,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
        /* for FCoE with DCB, we force the priority to what
         * was specified by the switch */
        if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED &&
-           (skb->protocol == htons(ETH_P_FCOE) ||
-            skb->protocol == htons(ETH_P_FIP))) {
+           (protocol == htons(ETH_P_FCOE) ||
+            protocol == htons(ETH_P_FIP))) {
 #ifdef CONFIG_IXGBE_DCB
                if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
                        tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
@@ -6334,7 +6333,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
                }
 #endif
                /* flag for FCoE offloads */
-               if (skb->protocol == htons(ETH_P_FCOE))
+               if (protocol == htons(ETH_P_FCOE))
                        tx_flags |= IXGBE_TX_FLAGS_FCOE;
        }
 #endif
@@ -6368,9 +6367,10 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
                        tx_flags |= IXGBE_TX_FLAGS_FSO;
 #endif /* IXGBE_FCOE */
        } else {
-               if (skb->protocol == htons(ETH_P_IP))
+               if (protocol == htons(ETH_P_IP))
                        tx_flags |= IXGBE_TX_FLAGS_IPV4;
-               tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
+               tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len,
+                               protocol);
                if (tso < 0) {
                        dev_kfree_skb_any(skb);
                        return NETDEV_TX_OK;
@@ -6378,12 +6378,13 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
 
                if (tso)
                        tx_flags |= IXGBE_TX_FLAGS_TSO;
-               else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) &&
+               else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags,
+                                      protocol) &&
                         (skb->ip_summed == CHECKSUM_PARTIAL))
                        tx_flags |= IXGBE_TX_FLAGS_CSUM;
        }
 
-       count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first);
+       count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len);
        if (count) {
                /* add the ATR filter if ATR is on */
                if (tx_ring->atr_sample_rate) {
@@ -6392,15 +6393,14 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
                             test_bit(__IXGBE_FDIR_INIT_DONE,
                                      &tx_ring->reinit_state)) {
                                ixgbe_atr(adapter, skb, tx_ring->queue_index,
-                                         tx_flags);
+                                         tx_flags, protocol);
                                tx_ring->atr_count = 0;
                        }
                }
                txq = netdev_get_tx_queue(netdev, tx_ring->queue_index);
                txq->tx_bytes += skb->len;
                txq->tx_packets++;
-               ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len,
-                              hdr_len);
+               ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len);
                ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
 
        } else {
@@ -6559,20 +6559,23 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
 
        /* accurate rx/tx bytes/packets stats */
        dev_txq_stats_fold(netdev, stats);
+       rcu_read_lock();
        for (i = 0; i < adapter->num_rx_queues; i++) {
-               struct ixgbe_ring *ring = adapter->rx_ring[i];
+               struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
                u64 bytes, packets;
                unsigned int start;
 
-               do {
-                       start = u64_stats_fetch_begin_bh(&ring->syncp);
-                       packets = ring->stats.packets;
-                       bytes   = ring->stats.bytes;
-               } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
-               stats->rx_packets += packets;
-               stats->rx_bytes   += bytes;
+               if (ring) {
+                       do {
+                               start = u64_stats_fetch_begin_bh(&ring->syncp);
+                               packets = ring->stats.packets;
+                               bytes   = ring->stats.bytes;
+                       } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+                       stats->rx_packets += packets;
+                       stats->rx_bytes   += bytes;
+               }
        }
-
+       rcu_read_unlock();
        /* following stats updated by ixgbe_watchdog_task() */
        stats->multicast        = netdev->stats.multicast;
        stats->rx_errors        = netdev->stats.rx_errors;
@@ -7278,6 +7281,7 @@ static void __exit ixgbe_exit_module(void)
        dca_unregister_notify(&dca_notifier);
 #endif
        pci_unregister_driver(&ixgbe_driver);
+       rcu_barrier(); /* Wait for completion of call_rcu()'s */
 }
 
 #ifdef CONFIG_IXGBE_DCA