/* Detect a transmit hang in hardware, this serializes the
* check with the clearing of time_stamp and movement of eop */
- adapter->detect_tx_hung = false;
+ clear_check_for_tx_hang(tx_ring);
if (tx_ring->tx_buffer_info[eop].time_stamp &&
time_after(jiffies, tx_ring->tx_buffer_info[eop].time_stamp + HZ) &&
ixgbe_tx_xon_state(adapter, tx_ring)) {
struct ixgbe_adapter *adapter = q_vector->adapter;
union ixgbe_adv_tx_desc *tx_desc, *eop_desc;
struct ixgbe_tx_buffer *tx_buffer_info;
- unsigned int i, eop, count = 0;
unsigned int total_bytes = 0, total_packets = 0;
+ u16 i, eop, count = 0;
i = tx_ring->next_to_clean;
eop = tx_ring->tx_buffer_info[i].next_to_watch;
}
tx_ring->next_to_clean = i;
+ tx_ring->total_bytes += total_bytes;
+ tx_ring->total_packets += total_packets;
+ u64_stats_update_begin(&tx_ring->syncp);
+ tx_ring->stats.packets += total_packets;
+ tx_ring->stats.bytes += total_bytes;
+ u64_stats_update_end(&tx_ring->syncp);
+
+ if (check_for_tx_hang(tx_ring) &&
+ ixgbe_check_tx_hang(adapter, tx_ring, i)) {
+ /* schedule immediate reset if we believe we hung */
+ e_info(probe, "tx hang %d detected, resetting "
+ "adapter\n", adapter->tx_timeout_count + 1);
+ ixgbe_tx_timeout(adapter->netdev);
+
+ /* the adapter is about to reset, no point in enabling stuff */
+ return true;
+ }
#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
if (unlikely(count && netif_carrier_ok(tx_ring->netdev) &&
}
}
- if (adapter->detect_tx_hung) {
- if (ixgbe_check_tx_hang(adapter, tx_ring, i)) {
- /* schedule immediate reset if we believe we hung */
- e_info(probe, "tx hang %d detected, resetting "
- "adapter\n", adapter->tx_timeout_count + 1);
- ixgbe_tx_timeout(adapter->netdev);
- }
- }
-
- /* re-arm the interrupt */
- if (count >= tx_ring->work_limit)
- ixgbe_irq_rearm_queues(adapter, ((u64)1 << q_vector->v_idx));
-
- tx_ring->total_bytes += total_bytes;
- tx_ring->total_packets += total_packets;
- u64_stats_update_begin(&tx_ring->syncp);
- tx_ring->stats.packets += total_packets;
- tx_ring->stats.bytes += total_bytes;
- u64_stats_update_end(&tx_ring->syncp);
return count < tx_ring->work_limit;
}
#ifdef CONFIG_IXGBE_DCA
static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *rx_ring)
+ struct ixgbe_ring *rx_ring,
+ int cpu)
{
+ struct ixgbe_hw *hw = &adapter->hw;
u32 rxctrl;
- int cpu = get_cpu();
- int q = rx_ring->reg_idx;
+ u8 reg_idx = rx_ring->reg_idx;
- if (rx_ring->cpu != cpu) {
- rxctrl = IXGBE_READ_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q));
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
- rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
- rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
- } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
- rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
- rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
- IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
- }
- rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
- rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
- rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
- rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
- IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
- IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_RXCTRL(q), rxctrl);
- rx_ring->cpu = cpu;
+ rxctrl = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(reg_idx));
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK;
+ rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
+ break;
+ case ixgbe_mac_82599EB:
+ rxctrl &= ~IXGBE_DCA_RXCTRL_CPUID_MASK_82599;
+ rxctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
+ IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599);
+ break;
+ default:
+ break;
}
- put_cpu();
+ rxctrl |= IXGBE_DCA_RXCTRL_DESC_DCA_EN;
+ rxctrl |= IXGBE_DCA_RXCTRL_HEAD_DCA_EN;
+ rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_RRO_EN);
+ rxctrl &= ~(IXGBE_DCA_RXCTRL_DESC_WRO_EN |
+ IXGBE_DCA_RXCTRL_DESC_HSRO_EN);
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl);
}
static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring)
+ struct ixgbe_ring *tx_ring,
+ int cpu)
{
+ struct ixgbe_hw *hw = &adapter->hw;
u32 txctrl;
+ u8 reg_idx = tx_ring->reg_idx;
+
+ switch (hw->mac.type) {
+ case ixgbe_mac_82598EB:
+ txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(reg_idx));
+ txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
+ txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
+ txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
+ txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(reg_idx), txctrl);
+ break;
+ case ixgbe_mac_82599EB:
+ txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx));
+ txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
+ txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
+ IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
+ txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
+ txctrl &= ~IXGBE_DCA_TXCTRL_TX_WB_RO_EN;
+ IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(reg_idx), txctrl);
+ break;
+ default:
+ break;
+ }
+}
+
+static void ixgbe_update_dca(struct ixgbe_q_vector *q_vector)
+{
+ struct ixgbe_adapter *adapter = q_vector->adapter;
int cpu = get_cpu();
- int q = tx_ring->reg_idx;
- struct ixgbe_hw *hw = &adapter->hw;
+ long r_idx;
+ int i;
- if (tx_ring->cpu != cpu) {
- if (adapter->hw.mac.type == ixgbe_mac_82598EB) {
- txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(q));
- txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK;
- txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
- txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL(q), txctrl);
- } else if (adapter->hw.mac.type == ixgbe_mac_82599EB) {
- txctrl = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL_82599(q));
- txctrl &= ~IXGBE_DCA_TXCTRL_CPUID_MASK_82599;
- txctrl |= (dca3_get_tag(&adapter->pdev->dev, cpu) <<
- IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599);
- txctrl |= IXGBE_DCA_TXCTRL_DESC_DCA_EN;
- IXGBE_WRITE_REG(hw, IXGBE_DCA_TXCTRL_82599(q), txctrl);
- }
- tx_ring->cpu = cpu;
+ if (q_vector->cpu == cpu)
+ goto out_no_update;
+
+ r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+ for (i = 0; i < q_vector->txr_count; i++) {
+ ixgbe_update_tx_dca(adapter, adapter->tx_ring[r_idx], cpu);
+ r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
+ r_idx + 1);
}
+
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ for (i = 0; i < q_vector->rxr_count; i++) {
+ ixgbe_update_rx_dca(adapter, adapter->rx_ring[r_idx], cpu);
+ r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
+ r_idx + 1);
+ }
+
+ q_vector->cpu = cpu;
+out_no_update:
put_cpu();
}
static void ixgbe_setup_dca(struct ixgbe_adapter *adapter)
{
+ int num_q_vectors;
int i;
if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
/* always use CB2 mode, difference is masked in the CB driver */
IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2);
- for (i = 0; i < adapter->num_tx_queues; i++) {
- adapter->tx_ring[i]->cpu = -1;
- ixgbe_update_tx_dca(adapter, adapter->tx_ring[i]);
- }
- for (i = 0; i < adapter->num_rx_queues; i++) {
- adapter->rx_ring[i]->cpu = -1;
- ixgbe_update_rx_dca(adapter, adapter->rx_ring[i]);
+ if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED)
+ num_q_vectors = adapter->num_msix_vectors - NON_Q_VECTORS;
+ else
+ num_q_vectors = 1;
+
+ for (i = 0; i < num_q_vectors; i++) {
+ adapter->q_vector[i]->cpu = -1;
+ ixgbe_update_dca(adapter->q_vector[i]);
}
}
struct ixgbe_adapter *adapter = dev_get_drvdata(dev);
unsigned long event = *(unsigned long *)data;
+ if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED))
+ return 0;
+
switch (event) {
case DCA_PROVIDER_ADD:
/* if we're already enabled, don't do it again */
}
}
- if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
+ if (ring_is_ps_enabled(rx_ring)) {
if (!bi->page) {
bi->page = netdev_alloc_page(rx_ring->netdev);
if (!bi->page) {
}
}
-static inline u16 ixgbe_get_hdr_info(union ixgbe_adv_rx_desc *rx_desc)
-{
- return rx_desc->wb.lower.lo_dword.hs_rss.hdr_info;
-}
-
-static inline u16 ixgbe_get_pkt_info(union ixgbe_adv_rx_desc *rx_desc)
+static inline u16 ixgbe_get_hlen(union ixgbe_adv_rx_desc *rx_desc)
{
- return rx_desc->wb.lower.lo_dword.hs_rss.pkt_info;
+ /* HW will not DMA in data larger than the given buffer, even if it
+ * parses the (NFS, of course) header to be larger. In that case, it
+ * fills the header buffer and spills the rest into the page.
+ */
+ u16 hdr_info = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.hdr_info);
+ u16 hlen = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
+ IXGBE_RXDADV_HDRBUFLEN_SHIFT;
+ if (hlen > IXGBE_RX_HDR_SIZE)
+ hlen = IXGBE_RX_HDR_SIZE;
+ return hlen;
}
static inline u32 ixgbe_get_rsc_count(union ixgbe_adv_rx_desc *rx_desc)
#define IXGBE_RSC_CB(skb) ((struct ixgbe_rsc_cb *)(skb)->cb)
-static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
+static void ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
struct ixgbe_ring *rx_ring,
int *work_done, int work_to_do)
{
union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
struct sk_buff *skb;
- unsigned int i, rsc_count = 0;
- u32 len, staterr;
- u16 hdr_info;
- bool cleaned = false;
- int cleaned_count = 0;
unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+ const int current_node = numa_node_id();
+ unsigned int rsc_count = 0;
#ifdef IXGBE_FCOE
int ddp_bytes = 0;
#endif /* IXGBE_FCOE */
+ u32 staterr;
+ u16 i;
+ u16 cleaned_count = 0;
i = rx_ring->next_to_clean;
rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
- rx_buffer_info = &rx_ring->rx_buffer_info[i];
while (staterr & IXGBE_RXD_STAT_DD) {
u32 upper_len = 0;
- if (*work_done >= work_to_do)
- break;
- (*work_done)++;
rmb(); /* read descriptor and rx_buffer_info after status DD */
- if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
- hdr_info = le16_to_cpu(ixgbe_get_hdr_info(rx_desc));
- len = (hdr_info & IXGBE_RXDADV_HDRBUFLEN_MASK) >>
- IXGBE_RXDADV_HDRBUFLEN_SHIFT;
- upper_len = le16_to_cpu(rx_desc->wb.upper.length);
- if ((len > IXGBE_RX_HDR_SIZE) ||
- (upper_len && !(hdr_info & IXGBE_RXDADV_SPH)))
- len = IXGBE_RX_HDR_SIZE;
- } else {
- len = le16_to_cpu(rx_desc->wb.upper.length);
- }
- cleaned = true;
+ rx_buffer_info = &rx_ring->rx_buffer_info[i];
+
skb = rx_buffer_info->skb;
- prefetch(skb->data);
rx_buffer_info->skb = NULL;
+ prefetch(skb->data);
+
+ if (ring_is_rsc_enabled(rx_ring))
+ rsc_count = ixgbe_get_rsc_count(rx_desc);
+ /* if this is a skb from previous receive DMA will be 0 */
if (rx_buffer_info->dma) {
- if ((adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) &&
- (!(staterr & IXGBE_RXD_STAT_EOP)) &&
- (!(skb->prev))) {
+ u16 hlen;
+ if (rsc_count &&
+ !(staterr & IXGBE_RXD_STAT_EOP) &&
+ !skb->prev) {
/*
* When HWRSC is enabled, delay unmapping
* of the first packet. It carries the
DMA_FROM_DEVICE);
}
rx_buffer_info->dma = 0;
- skb_put(skb, len);
+
+ if (ring_is_ps_enabled(rx_ring)) {
+ hlen = ixgbe_get_hlen(rx_desc);
+ upper_len = le16_to_cpu(rx_desc->wb.upper.length);
+ } else {
+ hlen = le16_to_cpu(rx_desc->wb.upper.length);
+ }
+
+ skb_put(skb, hlen);
+ } else {
+ /* assume packet split since header is unmapped */
+ upper_len = le16_to_cpu(rx_desc->wb.upper.length);
}
if (upper_len) {
rx_buffer_info->page_offset,
upper_len);
- if ((rx_ring->rx_buf_len > (PAGE_SIZE / 2)) ||
- (page_count(rx_buffer_info->page) != 1))
- rx_buffer_info->page = NULL;
- else
+ if ((page_count(rx_buffer_info->page) == 1) &&
+ (page_to_nid(rx_buffer_info->page) == current_node))
get_page(rx_buffer_info->page);
+ else
+ rx_buffer_info->page = NULL;
skb->len += upper_len;
skb->data_len += upper_len;
prefetch(next_rxd);
cleaned_count++;
- if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)
- rsc_count = ixgbe_get_rsc_count(rx_desc);
-
if (rsc_count) {
u32 nextp = (staterr & IXGBE_RXDADV_NEXTP_MASK) >>
IXGBE_RXDADV_NEXTP_SHIFT;
next_buffer = &rx_ring->rx_buffer_info[i];
}
- if (staterr & IXGBE_RXD_STAT_EOP) {
- if (skb->prev)
- skb = ixgbe_transform_rsc_queue(skb,
- &(rx_ring->rx_stats.rsc_count));
- if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) {
- if (IXGBE_RSC_CB(skb)->delay_unmap) {
- dma_unmap_single(rx_ring->dev,
- IXGBE_RSC_CB(skb)->dma,
- rx_ring->rx_buf_len,
- DMA_FROM_DEVICE);
- IXGBE_RSC_CB(skb)->dma = 0;
- IXGBE_RSC_CB(skb)->delay_unmap = false;
- }
- if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)
- rx_ring->rx_stats.rsc_count +=
- skb_shinfo(skb)->nr_frags;
- else
- rx_ring->rx_stats.rsc_count++;
- rx_ring->rx_stats.rsc_flush++;
- }
- u64_stats_update_begin(&rx_ring->syncp);
- rx_ring->stats.packets++;
- rx_ring->stats.bytes += skb->len;
- u64_stats_update_end(&rx_ring->syncp);
- } else {
- if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
+ if (!(staterr & IXGBE_RXD_STAT_EOP)) {
+ if (ring_is_ps_enabled(rx_ring)) {
rx_buffer_info->skb = next_buffer->skb;
rx_buffer_info->dma = next_buffer->dma;
next_buffer->skb = skb;
goto next_desc;
}
+ if (skb->prev)
+ skb = ixgbe_transform_rsc_queue(skb,
+ &(rx_ring->rx_stats.rsc_count));
+
+ if (ring_is_rsc_enabled(rx_ring)) {
+ if (IXGBE_RSC_CB(skb)->delay_unmap) {
+ dma_unmap_single(rx_ring->dev,
+ IXGBE_RSC_CB(skb)->dma,
+ rx_ring->rx_buf_len,
+ DMA_FROM_DEVICE);
+ IXGBE_RSC_CB(skb)->dma = 0;
+ IXGBE_RSC_CB(skb)->delay_unmap = false;
+ }
+ if (ring_is_ps_enabled(rx_ring))
+ rx_ring->rx_stats.rsc_count +=
+ skb_shinfo(skb)->nr_frags;
+ else
+ rx_ring->rx_stats.rsc_count++;
+ rx_ring->rx_stats.rsc_flush++;
+ }
+
+ /* ERR_MASK will only have valid bits if EOP set */
if (staterr & IXGBE_RXDADV_ERR_FRAME_ERR_MASK) {
- dev_kfree_skb_irq(skb);
+ /* trim packet back to size 0 and recycle it */
+ __pskb_trim(skb, 0);
+ rx_buffer_info->skb = skb;
goto next_desc;
}
next_desc:
rx_desc->wb.upper.status_error = 0;
+ (*work_done)++;
+ if (*work_done >= work_to_do)
+ break;
+
/* return some buffers to hardware, one at a time is too slow */
if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) {
ixgbe_alloc_rx_buffers(rx_ring, cleaned_count);
/* use prefetched values */
rx_desc = next_rxd;
- rx_buffer_info = &rx_ring->rx_buffer_info[i];
-
staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
}
rx_ring->total_packets += total_rx_packets;
rx_ring->total_bytes += total_rx_bytes;
-
- return cleaned;
+ u64_stats_update_begin(&rx_ring->syncp);
+ rx_ring->stats.packets += total_rx_packets;
+ rx_ring->stats.bytes += total_rx_bytes;
+ u64_stats_update_end(&rx_ring->syncp);
}
static int ixgbe_clean_rxonly(struct napi_struct *, int);
{
struct ixgbe_hw *hw = &adapter->hw;
+ if (eicr & IXGBE_EICR_GPI_SDP2) {
+ /* Clear the interrupt */
+ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
+ if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ schedule_work(&adapter->sfp_config_module_task);
+ }
+
if (eicr & IXGBE_EICR_GPI_SDP1) {
/* Clear the interrupt */
IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1);
- schedule_work(&adapter->multispeed_fiber_task);
- } else if (eicr & IXGBE_EICR_GPI_SDP2) {
- /* Clear the interrupt */
- IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2);
- schedule_work(&adapter->sfp_config_module_task);
- } else {
- /* Interrupt isn't for us... */
- return;
+ if (!test_bit(__IXGBE_DOWN, &adapter->state))
+ schedule_work(&adapter->multispeed_fiber_task);
}
}
for (i = 0; i < adapter->num_tx_queues; i++) {
struct ixgbe_ring *tx_ring =
adapter->tx_ring[i];
- if (test_and_clear_bit(__IXGBE_FDIR_INIT_DONE,
- &tx_ring->reinit_state))
+ if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE,
+ &tx_ring->state))
schedule_work(&adapter->fdir_reinit_task);
}
}
int r_idx;
int i;
+#ifdef CONFIG_IXGBE_DCA
+ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+ ixgbe_update_dca(q_vector);
+#endif
+
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- for (i = 0; i < q_vector->rxr_count; i++) {
+ for (i = 0; i < q_vector->rxr_count; i++) {
rx_ring = adapter->rx_ring[r_idx];
rx_ring->total_bytes = 0;
rx_ring->total_packets = 0;
if (!q_vector->rxr_count)
return IRQ_HANDLED;
- /* disable interrupts on this vector only */
/* EIAM disabled interrupts (on this vector) for us */
napi_schedule(&q_vector->napi);
int work_done = 0;
long r_idx;
- r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
- rx_ring = adapter->rx_ring[r_idx];
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
- ixgbe_update_rx_dca(adapter, rx_ring);
+ ixgbe_update_dca(q_vector);
#endif
+ r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
+ rx_ring = adapter->rx_ring[r_idx];
+
ixgbe_clean_rx_irq(q_vector, rx_ring, &work_done, budget);
/* If all Rx work done, exit the polling mode */
long r_idx;
bool tx_clean_complete = true;
+#ifdef CONFIG_IXGBE_DCA
+ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+ ixgbe_update_dca(q_vector);
+#endif
+
r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
for (i = 0; i < q_vector->txr_count; i++) {
ring = adapter->tx_ring[r_idx];
-#ifdef CONFIG_IXGBE_DCA
- if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
- ixgbe_update_tx_dca(adapter, ring);
-#endif
tx_clean_complete &= ixgbe_clean_tx_irq(q_vector, ring);
r_idx = find_next_bit(q_vector->txr_idx, adapter->num_tx_queues,
r_idx + 1);
r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
for (i = 0; i < q_vector->rxr_count; i++) {
ring = adapter->rx_ring[r_idx];
-#ifdef CONFIG_IXGBE_DCA
- if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
- ixgbe_update_rx_dca(adapter, ring);
-#endif
ixgbe_clean_rx_irq(q_vector, ring, &work_done, budget);
r_idx = find_next_bit(q_vector->rxr_idx, adapter->num_rx_queues,
r_idx + 1);
int work_done = 0;
long r_idx;
- r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
- tx_ring = adapter->tx_ring[r_idx];
#ifdef CONFIG_IXGBE_DCA
if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
- ixgbe_update_tx_dca(adapter, tx_ring);
+ ixgbe_update_dca(q_vector);
#endif
+ r_idx = find_first_bit(q_vector->txr_idx, adapter->num_tx_queues);
+ tx_ring = adapter->tx_ring[r_idx];
+
if (!ixgbe_clean_tx_irq(q_vector, tx_ring))
work_done = budget;
} else if (handler == &ixgbe_msix_clean_tx) {
sprintf(adapter->name[vector], "%s-%s-%d",
netdev->name, "tx", ti++);
- } else
+ } else {
sprintf(adapter->name[vector], "%s-%s-%d",
- netdev->name, "TxRx", vector);
+ netdev->name, "TxRx", ri++);
+ ti++;
+ }
err = request_irq(adapter->msix_entries[vector].vector,
handler, 0, adapter->name[vector],
}
/* reinitialize flowdirector state */
- set_bit(__IXGBE_FDIR_INIT_DONE, &ring->reinit_state);
+ if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) &&
+ adapter->atr_sample_rate) {
+ ring->atr_sample_rate = adapter->atr_sample_rate;
+ ring->atr_count = 0;
+ set_bit(__IXGBE_TX_FDIR_INIT_DONE, &ring->state);
+ } else {
+ ring->atr_sample_rate = 0;
+ }
/* enable queue */
txdctl |= IXGBE_TXDCTL_ENABLE;
srrctl |= (IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT) &
IXGBE_SRRCTL_BSIZEHDR_MASK;
- if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
+ if (ring_is_ps_enabled(rx_ring)) {
#if (PAGE_SIZE / 2) > IXGBE_MAX_RXBUFFER
srrctl |= IXGBE_MAX_RXBUFFER >> IXGBE_SRRCTL_BSIZEPKT_SHIFT;
#else
int rx_buf_len;
u16 reg_idx = ring->reg_idx;
- if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED))
+ if (!ring_is_rsc_enabled(ring))
return;
rx_buf_len = ring->rx_buf_len;
* total size of max desc * buf_len is not greater
* than 65535
*/
- if (ring->flags & IXGBE_RING_RX_PS_ENABLED) {
+ if (ring_is_ps_enabled(ring)) {
#if (MAX_SKB_FRAGS > 16)
rscctrl |= IXGBE_RSCCTL_MAXDESC_16;
#elif (MAX_SKB_FRAGS > 8)
rx_ring->rx_buf_len = rx_buf_len;
if (adapter->flags & IXGBE_FLAG_RX_PS_ENABLED)
- rx_ring->flags |= IXGBE_RING_RX_PS_ENABLED;
+ set_ring_ps_enabled(rx_ring);
+ else
+ clear_ring_ps_enabled(rx_ring);
+
+ if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)
+ set_ring_rsc_enabled(rx_ring);
else
- rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
+ clear_ring_rsc_enabled(rx_ring);
#ifdef IXGBE_FCOE
if (netdev->features & NETIF_F_FCOE_MTU) {
struct ixgbe_ring_feature *f;
f = &adapter->ring_feature[RING_F_FCOE];
if ((i >= f->mask) && (i < f->mask + f->indices)) {
- rx_ring->flags &= ~IXGBE_RING_RX_PS_ENABLED;
+ clear_ring_ps_enabled(rx_ring);
if (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)
rx_ring->rx_buf_len =
IXGBE_FCOE_JUMBO_FRAME_SIZE;
+ } else if (!ring_is_rsc_enabled(rx_ring) &&
+ !ring_is_ps_enabled(rx_ring)) {
+ rx_ring->rx_buf_len =
+ IXGBE_FCOE_JUMBO_FRAME_SIZE;
}
}
#endif /* IXGBE_FCOE */
clear_bit(__IXGBE_DOWN, &adapter->state);
ixgbe_napi_enable_all(adapter);
+ if (ixgbe_is_sfp(hw)) {
+ ixgbe_sfp_link_config(adapter);
+ } else {
+ err = ixgbe_non_sfp_link_config(hw);
+ if (err)
+ e_err(probe, "link_config FAILED %d\n", err);
+ }
+
/* clear any pending interrupts, may auto mask */
IXGBE_READ_REG(hw, IXGBE_EICR);
ixgbe_irq_enable(adapter, true, true);
* If we're not hot-pluggable SFP+, we just need to configure link
* and bring it up.
*/
- if (hw->phy.type == ixgbe_phy_unknown) {
- err = hw->phy.ops.identify(hw);
- if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) {
- /*
- * Take the device down and schedule the sfp tasklet
- * which will unregister_netdev and log it.
- */
- ixgbe_down(adapter);
- schedule_work(&adapter->sfp_config_module_task);
- return err;
- }
- }
-
- if (ixgbe_is_sfp(hw)) {
- ixgbe_sfp_link_config(adapter);
- } else {
- err = ixgbe_non_sfp_link_config(hw);
- if (err)
- e_err(probe, "link_config FAILED %d\n", err);
- }
+ if (hw->phy.type == ixgbe_phy_unknown)
+ schedule_work(&adapter->sfp_config_module_task);
/* enable transmits */
netif_tx_start_all_queues(adapter->netdev);
int tx_clean_complete, work_done = 0;
#ifdef CONFIG_IXGBE_DCA
- if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) {
- ixgbe_update_tx_dca(adapter, adapter->tx_ring[0]);
- ixgbe_update_rx_dca(adapter, adapter->rx_ring[0]);
- }
+ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED)
+ ixgbe_update_dca(q_vector);
#endif
tx_clean_complete = ixgbe_clean_tx_irq(q_vector, adapter->tx_ring[0]);
if (ixgbe_reinit_fdir_tables_82599(hw) == 0) {
for (i = 0; i < adapter->num_tx_queues; i++)
- set_bit(__IXGBE_FDIR_INIT_DONE,
- &(adapter->tx_ring[i]->reinit_state));
+ set_bit(__IXGBE_TX_FDIR_INIT_DONE,
+ &(adapter->tx_ring[i]->state));
} else {
e_err(probe, "failed to finish FDIR re-initialization, "
"ignored adding FDIR ATR filters\n");
netif_carrier_on(netdev);
} else {
/* Force detection of hung controller */
- adapter->detect_tx_hung = true;
+ for (i = 0; i < adapter->num_tx_queues; i++) {
+ tx_ring = adapter->tx_ring[i];
+ set_check_for_tx_hang(tx_ring);
+ }
}
} else {
adapter->link_up = false;
}
static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
- int queue, u32 tx_flags, __be16 protocol)
+ u8 queue, u32 tx_flags, __be16 protocol)
{
struct ixgbe_atr_input atr_input;
- struct tcphdr *th;
struct iphdr *iph = ip_hdr(skb);
struct ethhdr *eth = (struct ethhdr *)skb->data;
- u16 vlan_id, src_port, dst_port, flex_bytes;
- u32 src_ipv4_addr, dst_ipv4_addr;
- u8 l4type = 0;
+ struct tcphdr *th;
+ u16 vlan_id;
- /* Right now, we support IPv4 only */
- if (protocol != htons(ETH_P_IP))
- return;
- /* check if we're UDP or TCP */
- if (iph->protocol == IPPROTO_TCP) {
- th = tcp_hdr(skb);
- src_port = th->source;
- dst_port = th->dest;
- l4type |= IXGBE_ATR_L4TYPE_TCP;
- /* l4type IPv4 type is 0, no need to assign */
- } else {
- /* Unsupported L4 header, just bail here */
+ /* Right now, we support IPv4 w/ TCP only */
+ if (protocol != htons(ETH_P_IP) ||
+ iph->protocol != IPPROTO_TCP)
return;
- }
memset(&atr_input, 0, sizeof(struct ixgbe_atr_input));
vlan_id = (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK) >>
IXGBE_TX_FLAGS_VLAN_SHIFT;
- src_ipv4_addr = iph->saddr;
- dst_ipv4_addr = iph->daddr;
- flex_bytes = eth->h_proto;
+
+ th = tcp_hdr(skb);
ixgbe_atr_set_vlan_id_82599(&atr_input, vlan_id);
- ixgbe_atr_set_src_port_82599(&atr_input, dst_port);
- ixgbe_atr_set_dst_port_82599(&atr_input, src_port);
- ixgbe_atr_set_flex_byte_82599(&atr_input, flex_bytes);
- ixgbe_atr_set_l4type_82599(&atr_input, l4type);
+ ixgbe_atr_set_src_port_82599(&atr_input, th->dest);
+ ixgbe_atr_set_dst_port_82599(&atr_input, th->source);
+ ixgbe_atr_set_flex_byte_82599(&atr_input, eth->h_proto);
+ ixgbe_atr_set_l4type_82599(&atr_input, IXGBE_ATR_L4TYPE_TCP);
/* src and dst are inverted, think how the receiver sees them */
- ixgbe_atr_set_src_ipv4_82599(&atr_input, dst_ipv4_addr);
- ixgbe_atr_set_dst_ipv4_82599(&atr_input, src_ipv4_addr);
+ ixgbe_atr_set_src_ipv4_82599(&atr_input, iph->daddr);
+ ixgbe_atr_set_dst_ipv4_82599(&atr_input, iph->saddr);
/* This assumes the Rx queue and Tx queue are bound to the same CPU */
ixgbe_fdir_add_signature_filter_82599(&adapter->hw, &atr_input, queue);
if (tx_ring->atr_sample_rate) {
++tx_ring->atr_count;
if ((tx_ring->atr_count >= tx_ring->atr_sample_rate) &&
- test_bit(__IXGBE_FDIR_INIT_DONE,
- &tx_ring->reinit_state)) {
+ test_bit(__IXGBE_TX_FDIR_INIT_DONE,
+ &tx_ring->state)) {
ixgbe_atr(adapter, skb, tx_ring->queue_index,
tx_flags, protocol);
tx_ring->atr_count = 0;