]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - drivers/net/ixgbe/ixgbe_main.c
ixgbe: delay rx_ring freeing
[net-next-2.6.git] / drivers / net / ixgbe / ixgbe_main.c
index f85631263af8573f7f0ea96ac63e961abebe225b..a137f9dbaacd7ee90e410bdf246acedbb38faa7c 100644 (file)
@@ -764,8 +764,9 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
 #ifdef IXGBE_FCOE
                                /* adjust for FCoE Sequence Offload */
                                if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
-                                   && (skb->protocol == htons(ETH_P_FCOE)) &&
-                                   skb_is_gso(skb)) {
+                                   && skb_is_gso(skb)
+                                   && vlan_get_protocol(skb) ==
+                                   htons(ETH_P_FCOE)) {
                                        hlen = skb_transport_offset(skb) +
                                                sizeof(struct fc_frame_header) +
                                                sizeof(struct fcoe_crc_eof);
@@ -3347,6 +3348,7 @@ static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter)
 static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
 {
        struct ixgbe_hw *hw = &adapter->hw;
+       int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
        u32 txdctl;
        int i, j;
 
@@ -3359,8 +3361,15 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter)
        if (hw->mac.type == ixgbe_mac_82598EB)
                netif_set_gso_max_size(adapter->netdev, 32768);
 
-       ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_TX_CONFIG);
-       ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, DCB_RX_CONFIG);
+#ifdef CONFIG_FCOE
+       if (adapter->netdev->features & NETIF_F_FCOE_MTU)
+               max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
+#endif
+
+       ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, max_frame,
+                                       DCB_TX_CONFIG);
+       ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, max_frame,
+                                       DCB_RX_CONFIG);
 
        /* reconfigure the hardware */
        ixgbe_dcb_hw_config(&adapter->hw, &adapter->dcb_cfg);
@@ -4742,6 +4751,11 @@ err_set_interrupt:
        return err;
 }
 
+static void ring_free_rcu(struct rcu_head *head)
+{
+       kfree(container_of(head, struct ixgbe_ring, rcu));
+}
+
 /**
  * ixgbe_clear_interrupt_scheme - Clear the current interrupt scheme settings
  * @adapter: board private structure to clear interrupt scheme on
@@ -4758,7 +4772,12 @@ void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter)
                adapter->tx_ring[i] = NULL;
        }
        for (i = 0; i < adapter->num_rx_queues; i++) {
-               kfree(adapter->rx_ring[i]);
+               struct ixgbe_ring *ring = adapter->rx_ring[i];
+
+               /* ixgbe_get_stats64() might access this ring, we must wait
+                * a grace period before freeing it.
+                */
+               call_rcu(&ring->rcu, ring_free_rcu);
                adapter->rx_ring[i] = NULL;
        }
 
@@ -5815,7 +5834,7 @@ static void ixgbe_watchdog_task(struct work_struct *work)
 
 static int ixgbe_tso(struct ixgbe_adapter *adapter,
                     struct ixgbe_ring *tx_ring, struct sk_buff *skb,
-                    u32 tx_flags, u8 *hdr_len)
+                    u32 tx_flags, u8 *hdr_len, __be16 protocol)
 {
        struct ixgbe_adv_tx_context_desc *context_desc;
        unsigned int i;
@@ -5833,7 +5852,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
                l4len = tcp_hdrlen(skb);
                *hdr_len += l4len;
 
-               if (skb->protocol == htons(ETH_P_IP)) {
+               if (protocol == htons(ETH_P_IP)) {
                        struct iphdr *iph = ip_hdr(skb);
                        iph->tot_len = 0;
                        iph->check = 0;
@@ -5872,7 +5891,7 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
                type_tucmd_mlhl = (IXGBE_TXD_CMD_DEXT |
                                   IXGBE_ADVTXD_DTYP_CTXT);
 
-               if (skb->protocol == htons(ETH_P_IP))
+               if (protocol == htons(ETH_P_IP))
                        type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_IPV4;
                type_tucmd_mlhl |= IXGBE_ADVTXD_TUCMD_L4T_TCP;
                context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
@@ -5898,16 +5917,10 @@ static int ixgbe_tso(struct ixgbe_adapter *adapter,
        return false;
 }
 
-static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb)
+static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb,
+                     __be16 protocol)
 {
        u32 rtn = 0;
-       __be16 protocol;
-
-       if (skb->protocol == cpu_to_be16(ETH_P_8021Q))
-               protocol = ((const struct vlan_ethhdr *)skb->data)->
-                                       h_vlan_encapsulated_proto;
-       else
-               protocol = skb->protocol;
 
        switch (protocol) {
        case cpu_to_be16(ETH_P_IP):
@@ -5935,7 +5948,7 @@ static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb)
        default:
                if (unlikely(net_ratelimit()))
                        e_warn(probe, "partial checksum but proto=%x!\n",
-                              skb->protocol);
+                              protocol);
                break;
        }
 
@@ -5944,7 +5957,8 @@ static u32 ixgbe_psum(struct ixgbe_adapter *adapter, struct sk_buff *skb)
 
 static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
                          struct ixgbe_ring *tx_ring,
-                         struct sk_buff *skb, u32 tx_flags)
+                         struct sk_buff *skb, u32 tx_flags,
+                         __be16 protocol)
 {
        struct ixgbe_adv_tx_context_desc *context_desc;
        unsigned int i;
@@ -5973,7 +5987,7 @@ static bool ixgbe_tx_csum(struct ixgbe_adapter *adapter,
                                    IXGBE_ADVTXD_DTYP_CTXT);
 
                if (skb->ip_summed == CHECKSUM_PARTIAL)
-                       type_tucmd_mlhl |= ixgbe_psum(adapter, skb);
+                       type_tucmd_mlhl |= ixgbe_psum(adapter, skb, protocol);
 
                context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd_mlhl);
                /* use index zero for tx checksum offload */
@@ -6171,7 +6185,7 @@ static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
 }
 
 static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
-                     int queue, u32 tx_flags)
+                     int queue, u32 tx_flags, __be16 protocol)
 {
        struct ixgbe_atr_input atr_input;
        struct tcphdr *th;
@@ -6182,7 +6196,7 @@ static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
        u8 l4type = 0;
 
        /* Right now, we support IPv4 only */
-       if (skb->protocol != htons(ETH_P_IP))
+       if (protocol != htons(ETH_P_IP))
                return;
        /* check if we're UDP or TCP */
        if (iph->protocol == IPPROTO_TCP) {
@@ -6249,10 +6263,13 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb)
 {
        struct ixgbe_adapter *adapter = netdev_priv(dev);
        int txq = smp_processor_id();
-
 #ifdef IXGBE_FCOE
-       if ((skb->protocol == htons(ETH_P_FCOE)) ||
-           (skb->protocol == htons(ETH_P_FIP))) {
+       __be16 protocol;
+
+       protocol = vlan_get_protocol(skb);
+
+       if ((protocol == htons(ETH_P_FCOE)) ||
+           (protocol == htons(ETH_P_FIP))) {
                if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) {
                        txq &= (adapter->ring_feature[RING_F_FCOE].indices - 1);
                        txq += adapter->ring_feature[RING_F_FCOE].mask;
@@ -6295,6 +6312,9 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
        int tso;
        int count = 0;
        unsigned int f;
+       __be16 protocol;
+
+       protocol = vlan_get_protocol(skb);
 
        if (vlan_tx_tag_present(skb)) {
                tx_flags |= vlan_tx_tag_get(skb);
@@ -6315,8 +6335,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
        /* for FCoE with DCB, we force the priority to what
         * was specified by the switch */
        if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED &&
-           (skb->protocol == htons(ETH_P_FCOE) ||
-            skb->protocol == htons(ETH_P_FIP))) {
+           (protocol == htons(ETH_P_FCOE) ||
+            protocol == htons(ETH_P_FIP))) {
 #ifdef CONFIG_IXGBE_DCB
                if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) {
                        tx_flags &= ~(IXGBE_TX_FLAGS_VLAN_PRIO_MASK
@@ -6326,7 +6346,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
                }
 #endif
                /* flag for FCoE offloads */
-               if (skb->protocol == htons(ETH_P_FCOE))
+               if (protocol == htons(ETH_P_FCOE))
                        tx_flags |= IXGBE_TX_FLAGS_FCOE;
        }
 #endif
@@ -6360,9 +6380,10 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
                        tx_flags |= IXGBE_TX_FLAGS_FSO;
 #endif /* IXGBE_FCOE */
        } else {
-               if (skb->protocol == htons(ETH_P_IP))
+               if (protocol == htons(ETH_P_IP))
                        tx_flags |= IXGBE_TX_FLAGS_IPV4;
-               tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len);
+               tso = ixgbe_tso(adapter, tx_ring, skb, tx_flags, &hdr_len,
+                               protocol);
                if (tso < 0) {
                        dev_kfree_skb_any(skb);
                        return NETDEV_TX_OK;
@@ -6370,7 +6391,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
 
                if (tso)
                        tx_flags |= IXGBE_TX_FLAGS_TSO;
-               else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags) &&
+               else if (ixgbe_tx_csum(adapter, tx_ring, skb, tx_flags,
+                                      protocol) &&
                         (skb->ip_summed == CHECKSUM_PARTIAL))
                        tx_flags |= IXGBE_TX_FLAGS_CSUM;
        }
@@ -6384,7 +6406,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev
                             test_bit(__IXGBE_FDIR_INIT_DONE,
                                      &tx_ring->reinit_state)) {
                                ixgbe_atr(adapter, skb, tx_ring->queue_index,
-                                         tx_flags);
+                                         tx_flags, protocol);
                                tx_ring->atr_count = 0;
                        }
                }
@@ -6551,20 +6573,23 @@ static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev,
 
        /* accurate rx/tx bytes/packets stats */
        dev_txq_stats_fold(netdev, stats);
+       rcu_read_lock();
        for (i = 0; i < adapter->num_rx_queues; i++) {
-               struct ixgbe_ring *ring = adapter->rx_ring[i];
+               struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]);
                u64 bytes, packets;
                unsigned int start;
 
-               do {
-                       start = u64_stats_fetch_begin_bh(&ring->syncp);
-                       packets = ring->stats.packets;
-                       bytes   = ring->stats.bytes;
-               } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
-               stats->rx_packets += packets;
-               stats->rx_bytes   += bytes;
+               if (ring) {
+                       do {
+                               start = u64_stats_fetch_begin_bh(&ring->syncp);
+                               packets = ring->stats.packets;
+                               bytes   = ring->stats.bytes;
+                       } while (u64_stats_fetch_retry_bh(&ring->syncp, start));
+                       stats->rx_packets += packets;
+                       stats->rx_bytes   += bytes;
+               }
        }
-
+       rcu_read_unlock();
        /* following stats updated by ixgbe_watchdog_task() */
        stats->multicast        = netdev->stats.multicast;
        stats->rx_errors        = netdev->stats.rx_errors;
@@ -7270,6 +7295,7 @@ static void __exit ixgbe_exit_module(void)
        dca_unregister_notify(&dca_notifier);
 #endif
        pci_unregister_driver(&ixgbe_driver);
+       rcu_barrier(); /* Wait for completion of call_rcu()'s */
 }
 
 #ifdef CONFIG_IXGBE_DCA