" time_stamp <%lx>\n"
" jiffies <%lx>\n",
tx_ring->queue_index,
- IXGBE_READ_REG(hw, tx_ring->head),
- IXGBE_READ_REG(hw, tx_ring->tail),
+ IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)),
+ IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)),
tx_ring->next_to_use, eop,
tx_ring->tx_buffer_info[eop].time_stamp, jiffies);
return true;
bool cleaned = false;
rmb(); /* read buffer_info after eop_desc */
for ( ; !cleaned; count++) {
- struct sk_buff *skb;
tx_desc = IXGBE_TX_DESC_ADV(tx_ring, i);
tx_buffer_info = &tx_ring->tx_buffer_info[i];
+
+ tx_desc->wb.status = 0;
cleaned = (i == eop);
- skb = tx_buffer_info->skb;
- if (cleaned && skb) {
- unsigned int segs, bytecount;
- unsigned int hlen = skb_headlen(skb);
+ i++;
+ if (i == tx_ring->count)
+ i = 0;
- /* gso_segs is currently only valid for tcp */
- segs = skb_shinfo(skb)->gso_segs ?: 1;
-#ifdef IXGBE_FCOE
- /* adjust for FCoE Sequence Offload */
- if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED)
- && skb_is_gso(skb)
- && vlan_get_protocol(skb) ==
- htons(ETH_P_FCOE)) {
- hlen = skb_transport_offset(skb) +
- sizeof(struct fc_frame_header) +
- sizeof(struct fcoe_crc_eof);
- segs = DIV_ROUND_UP(skb->len - hlen,
- skb_shinfo(skb)->gso_size);
- }
-#endif /* IXGBE_FCOE */
- /* multiply data chunks by size of headers */
- bytecount = ((segs - 1) * hlen) + skb->len;
- total_packets += segs;
- total_bytes += bytecount;
+ if (cleaned && tx_buffer_info->skb) {
+ total_bytes += tx_buffer_info->bytecount;
+ total_packets += tx_buffer_info->gso_segs;
}
ixgbe_unmap_and_free_tx_resource(adapter,
tx_buffer_info);
-
- tx_desc->wb.status = 0;
-
- i++;
- if (i == tx_ring->count)
- i = 0;
}
eop = tx_ring->tx_buffer_info[i].next_to_watch;
skb->ip_summed = CHECKSUM_UNNECESSARY;
}
-static inline void ixgbe_release_rx_desc(struct ixgbe_hw *hw,
- struct ixgbe_ring *rx_ring, u32 val)
+static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val)
{
/*
* Force memory writes to complete before letting h/w
* such as IA-64).
*/
wmb();
- IXGBE_WRITE_REG(hw, IXGBE_RDT(rx_ring->reg_idx), val);
+ writel(val, rx_ring->tail);
}
/**
**/
void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
struct ixgbe_ring *rx_ring,
- int cleaned_count)
+ u16 cleaned_count)
{
- struct net_device *netdev = adapter->netdev;
struct pci_dev *pdev = adapter->pdev;
union ixgbe_adv_rx_desc *rx_desc;
struct ixgbe_rx_buffer *bi;
- unsigned int i;
- unsigned int bufsz = rx_ring->rx_buf_len;
-
- i = rx_ring->next_to_use;
- bi = &rx_ring->rx_buffer_info[i];
+ struct sk_buff *skb;
+ u16 i = rx_ring->next_to_use;
while (cleaned_count--) {
rx_desc = IXGBE_RX_DESC_ADV(rx_ring, i);
+ bi = &rx_ring->rx_buffer_info[i];
+ skb = bi->skb;
- if (!bi->page_dma &&
- (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED)) {
- if (!bi->page) {
- bi->page = netdev_alloc_page(netdev);
- if (!bi->page) {
- adapter->alloc_rx_page_failed++;
- goto no_buffers;
- }
- bi->page_offset = 0;
- } else {
- /* use a half page if we're re-using */
- bi->page_offset ^= (PAGE_SIZE / 2);
- }
-
- bi->page_dma = dma_map_page(&pdev->dev, bi->page,
- bi->page_offset,
- (PAGE_SIZE / 2),
- DMA_FROM_DEVICE);
- }
-
- if (!bi->skb) {
- struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev,
- bufsz);
- bi->skb = skb;
-
+ if (!skb) {
+ skb = netdev_alloc_skb_ip_align(adapter->netdev,
+ rx_ring->rx_buf_len);
if (!skb) {
adapter->alloc_rx_buff_failed++;
goto no_buffers;
}
/* initialize queue mapping */
skb_record_rx_queue(skb, rx_ring->queue_index);
+ bi->skb = skb;
}
if (!bi->dma) {
bi->dma = dma_map_single(&pdev->dev,
- bi->skb->data,
+ skb->data,
rx_ring->rx_buf_len,
DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev, bi->dma)) {
+ adapter->alloc_rx_buff_failed++;
+ bi->dma = 0;
+ goto no_buffers;
+ }
}
- /* Refresh the desc even if buffer_addrs didn't change because
- * each write-back erases this info. */
+
if (rx_ring->flags & IXGBE_RING_RX_PS_ENABLED) {
+ if (!bi->page) {
+ bi->page = netdev_alloc_page(adapter->netdev);
+ if (!bi->page) {
+ adapter->alloc_rx_page_failed++;
+ goto no_buffers;
+ }
+ }
+
+ if (!bi->page_dma) {
+ /* use a half page if we're re-using */
+ bi->page_offset ^= PAGE_SIZE / 2;
+ bi->page_dma = dma_map_page(&pdev->dev,
+ bi->page,
+ bi->page_offset,
+ PAGE_SIZE / 2,
+ DMA_FROM_DEVICE);
+ if (dma_mapping_error(&pdev->dev,
+ bi->page_dma)) {
+ adapter->alloc_rx_page_failed++;
+ bi->page_dma = 0;
+ goto no_buffers;
+ }
+ }
+
+ /* Refresh the desc even if buffer_addrs didn't change
+ * because each write-back erases this info. */
rx_desc->read.pkt_addr = cpu_to_le64(bi->page_dma);
rx_desc->read.hdr_addr = cpu_to_le64(bi->dma);
} else {
i++;
if (i == rx_ring->count)
i = 0;
- bi = &rx_ring->rx_buffer_info[i];
}
no_buffers:
if (rx_ring->next_to_use != i) {
rx_ring->next_to_use = i;
- if (i-- == 0)
- i = (rx_ring->count - 1);
-
- ixgbe_release_rx_desc(&adapter->hw, rx_ring, i);
+ ixgbe_release_rx_desc(rx_ring, i);
}
}
ring->count * sizeof(union ixgbe_adv_tx_desc));
IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0);
IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0);
- ring->head = IXGBE_TDH(reg_idx);
- ring->tail = IXGBE_TDT(reg_idx);
+ ring->tail = hw->hw_addr + IXGBE_TDT(reg_idx);
/* configure fetching thresholds */
if (adapter->rx_itr_setting == 0) {
ring->count * sizeof(union ixgbe_adv_rx_desc));
IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0);
IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0);
- ring->head = IXGBE_RDH(reg_idx);
- ring->tail = IXGBE_RDT(reg_idx);
+ ring->tail = hw->hw_addr + IXGBE_RDT(reg_idx);
ixgbe_configure_srrctl(adapter, ring);
ixgbe_configure_rscctl(adapter, ring);
max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE);
#endif
- ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, max_frame,
+ ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
DCB_TX_CONFIG);
- ixgbe_dcb_calculate_tc_credits(&adapter->dcb_cfg, max_frame,
+ ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame,
DCB_RX_CONFIG);
/* reconfigure the hardware */
rx_ring->next_to_clean = 0;
rx_ring->next_to_use = 0;
-
- if (rx_ring->head)
- writel(0, adapter->hw.hw_addr + rx_ring->head);
- if (rx_ring->tail)
- writel(0, adapter->hw.hw_addr + rx_ring->tail);
}
/**
tx_ring->next_to_use = 0;
tx_ring->next_to_clean = 0;
-
- if (tx_ring->head)
- writel(0, adapter->hw.hw_addr + tx_ring->head);
- if (tx_ring->tail)
- writel(0, adapter->hw.hw_addr + tx_ring->tail);
}
/**
int j;
struct tc_configuration *tc;
#endif
+ int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN;
/* PCI config space info */
#ifdef CONFIG_DCB
adapter->last_lfc_mode = hw->fc.current_mode;
#endif
- hw->fc.high_water = IXGBE_DEFAULT_FCRTH;
- hw->fc.low_water = IXGBE_DEFAULT_FCRTL;
+ hw->fc.high_water = FC_HIGH_WATER(max_frame);
+ hw->fc.low_water = FC_LOW_WATER(max_frame);
hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE;
hw->fc.send_xon = true;
hw->fc.disable_fc_autoneg = false;
static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
{
struct ixgbe_adapter *adapter = netdev_priv(netdev);
+ struct ixgbe_hw *hw = &adapter->hw;
int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
/* MTU < 68 is an error and causes problems on some kernels */
/* must set new MTU before calling down or up */
netdev->mtu = new_mtu;
+ hw->fc.high_water = FC_HIGH_WATER(max_frame);
+ hw->fc.low_water = FC_LOW_WATER(max_frame);
+
if (netif_running(netdev))
ixgbe_reinit_locked(adapter);
static int ixgbe_tx_map(struct ixgbe_adapter *adapter,
struct ixgbe_ring *tx_ring,
struct sk_buff *skb, u32 tx_flags,
- unsigned int first)
+ unsigned int first, const u8 hdr_len)
{
struct pci_dev *pdev = adapter->pdev;
struct ixgbe_tx_buffer *tx_buffer_info;
unsigned int offset = 0, size, count = 0, i;
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
unsigned int f;
+ unsigned int bytecount = skb->len;
+ u16 gso_segs = 1;
i = tx_ring->next_to_use;
break;
}
+ if (tx_flags & IXGBE_TX_FLAGS_TSO)
+ gso_segs = skb_shinfo(skb)->gso_segs;
+#ifdef IXGBE_FCOE
+ /* adjust for FCoE Sequence Offload */
+ else if (tx_flags & IXGBE_TX_FLAGS_FSO)
+ gso_segs = DIV_ROUND_UP(skb->len - hdr_len,
+ skb_shinfo(skb)->gso_size);
+#endif /* IXGBE_FCOE */
+ bytecount += (gso_segs - 1) * hdr_len;
+
+ /* multiply data chunks by size of headers */
+ tx_ring->tx_buffer_info[i].bytecount = bytecount;
+ tx_ring->tx_buffer_info[i].gso_segs = gso_segs;
tx_ring->tx_buffer_info[i].skb = skb;
tx_ring->tx_buffer_info[first].next_to_watch = i;
return 0;
}
-static void ixgbe_tx_queue(struct ixgbe_adapter *adapter,
- struct ixgbe_ring *tx_ring,
+static void ixgbe_tx_queue(struct ixgbe_ring *tx_ring,
int tx_flags, int count, u32 paylen, u8 hdr_len)
{
union ixgbe_adv_tx_desc *tx_desc = NULL;
wmb();
tx_ring->next_to_use = i;
- writel(i, adapter->hw.hw_addr + tx_ring->tail);
+ writel(i, tx_ring->tail);
}
static void ixgbe_atr(struct ixgbe_adapter *adapter, struct sk_buff *skb,
tx_flags |= IXGBE_TX_FLAGS_CSUM;
}
- count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first);
+ count = ixgbe_tx_map(adapter, tx_ring, skb, tx_flags, first, hdr_len);
if (count) {
/* add the ATR filter if ATR is on */
if (tx_ring->atr_sample_rate) {
txq = netdev_get_tx_queue(netdev, tx_ring->queue_index);
txq->tx_bytes += skb->len;
txq->tx_packets++;
- ixgbe_tx_queue(adapter, tx_ring, tx_flags, count, skb->len,
- hdr_len);
+ ixgbe_tx_queue(tx_ring, tx_flags, count, skb->len, hdr_len);
ixgbe_maybe_stop_tx(netdev, tx_ring, DESC_NEEDED);
} else {