#define TG3_DEF_RX_RING_PENDING 200
#define TG3_RX_JUMBO_RING_SIZE 256
#define TG3_DEF_RX_JUMBO_RING_PENDING 100
+#define TG3_RSS_INDIR_TBL_SIZE 128
/* Do not place this n-ring entries value into the tp struct itself,
* we really want to expose these constants to GCC so that modulo et
static void tg3_disable_ints(struct tg3 *tp)
{
+ int i;
+
tw32(TG3PCI_MISC_HOST_CTRL,
(tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
- tw32_mailbox_f(tp->napi[0].int_mbox, 0x00000001);
+ for (i = 0; i < tp->irq_max; i++)
+ tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
}
static void tg3_enable_ints(struct tg3 *tp)
{
- u32 coal_now;
- struct tg3_napi *tnapi = &tp->napi[0];
+ int i;
+ u32 coal_now = 0;
+
tp->irq_sync = 0;
wmb();
tw32(TG3PCI_MISC_HOST_CTRL,
(tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
- tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
- if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
+
+ for (i = 0; i < tp->irq_cnt; i++) {
+ struct tg3_napi *tnapi = &tp->napi[i];
tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
+ if (tp->tg3_flags2 & TG3_FLG2_1SHOT_MSI)
+ tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
- coal_now = tnapi->coal_now;
+ coal_now |= tnapi->coal_now;
+ }
/* Force an initial interrupt */
if (!(tp->tg3_flags & TG3_FLAG_TAGGED_STATUS) &&
HOSTCC_MODE_ENABLE | tnapi->coal_now);
}
+static void tg3_napi_disable(struct tg3 *tp)
+{
+ int i;
+
+ for (i = tp->irq_cnt - 1; i >= 0; i--)
+ napi_disable(&tp->napi[i].napi);
+}
+
+static void tg3_napi_enable(struct tg3 *tp)
+{
+ int i;
+
+ for (i = 0; i < tp->irq_cnt; i++)
+ napi_enable(&tp->napi[i].napi);
+}
+
static inline void tg3_netif_stop(struct tg3 *tp)
{
tp->dev->trans_start = jiffies; /* prevent tx timeout */
- napi_disable(&tp->napi[0].napi);
+ tg3_napi_disable(tp);
netif_tx_disable(tp->dev);
}
static inline void tg3_netif_start(struct tg3 *tp)
{
- struct tg3_napi *tnapi = &tp->napi[0];
- netif_wake_queue(tp->dev);
- /* NOTE: unconditional netif_wake_queue is only appropriate
- * so long as all callers are assured to have free tx slots
- * (such as after tg3_init_hw)
+ /* NOTE: unconditional netif_tx_wake_all_queues is only
+ * appropriate so long as all callers are assured to
+ * have free tx slots (such as after tg3_init_hw)
*/
- napi_enable(&tnapi->napi);
- tnapi->hw_status->status |= SD_STATUS_UPDATED;
+ netif_tx_wake_all_queues(tp->dev);
+
+ tg3_napi_enable(tp);
+ tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
tg3_enable_ints(tp);
}
struct tg3 *tp = tnapi->tp;
u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
u32 sw_idx = tnapi->tx_cons;
+ struct netdev_queue *txq;
+ int index = tnapi - tp->napi;
+
+ if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
+ index--;
+
+ txq = netdev_get_tx_queue(tp->dev, index);
while (sw_idx != hw_idx) {
struct tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
*/
smp_mb();
- if (unlikely(netif_queue_stopped(tp->dev) &&
+ if (unlikely(netif_tx_queue_stopped(txq) &&
(tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
- netif_tx_lock(tp->dev);
- if (netif_queue_stopped(tp->dev) &&
+ __netif_tx_lock(txq, smp_processor_id());
+ if (netif_tx_queue_stopped(txq) &&
(tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
- netif_wake_queue(tp->dev);
- netif_tx_unlock(tp->dev);
+ netif_tx_wake_queue(txq);
+ __netif_tx_unlock(txq);
}
}
tg3_full_unlock(tp);
del_timer_sync(&tp->timer);
tp->irq_sync = 0;
- napi_enable(&tp->napi[0].napi);
+ tg3_napi_enable(tp);
dev_close(tp->dev);
tg3_full_lock(tp, 0);
}
u32 len, entry, base_flags, mss;
struct skb_shared_info *sp;
dma_addr_t mapping;
- struct tg3_napi *tnapi = &tp->napi[0];
+ struct tg3_napi *tnapi;
+ struct netdev_queue *txq;
- len = skb_headlen(skb);
+ txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
+ tnapi = &tp->napi[skb_get_queue_mapping(skb)];
+ if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
+ tnapi++;
/* We are running in BH disabled context with netif_tx_lock
* and TX reclaim runs via tp->napi.poll inside of a software
* no IRQ context deadlocks to worry about either. Rejoice!
*/
if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
- if (!netif_queue_stopped(dev)) {
- netif_stop_queue(dev);
+ if (!netif_tx_queue_stopped(txq)) {
+ netif_tx_stop_queue(txq);
/* This is a hard error, log it. */
printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
tnapi->tx_buffers[entry].skb = skb;
+ len = skb_headlen(skb);
+
tg3_set_txd(tnapi, entry, mapping, len, base_flags,
(skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
tnapi->tx_prod = entry;
if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
- netif_stop_queue(dev);
+ netif_tx_stop_queue(txq);
if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
- netif_wake_queue(tp->dev);
+ netif_tx_wake_queue(txq);
}
out_unlock:
*/
static void tg3_free_rings(struct tg3 *tp)
{
- struct tg3_napi *tnapi = &tp->napi[0];
- int i;
+ int i, j;
- for (i = 0; i < TG3_TX_RING_SIZE; ) {
- struct tx_ring_info *txp;
- struct sk_buff *skb;
+ for (j = 0; j < tp->irq_cnt; j++) {
+ struct tg3_napi *tnapi = &tp->napi[j];
- txp = &tnapi->tx_buffers[i];
- skb = txp->skb;
+ for (i = 0; i < TG3_TX_RING_SIZE; ) {
+ struct tx_ring_info *txp;
+ struct sk_buff *skb;
- if (skb == NULL) {
- i++;
- continue;
- }
+ txp = &tnapi->tx_buffers[i];
+ skb = txp->skb;
- skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
+ if (skb == NULL) {
+ i++;
+ continue;
+ }
+
+ skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
- txp->skb = NULL;
+ txp->skb = NULL;
- i += skb_shinfo(skb)->nr_frags + 1;
+ i += skb_shinfo(skb)->nr_frags + 1;
- dev_kfree_skb_any(skb);
+ dev_kfree_skb_any(skb);
+ }
}
tg3_rx_prodring_free(tp, &tp->prodring[0]);
*/
static int tg3_init_rings(struct tg3 *tp)
{
- struct tg3_napi *tnapi = &tp->napi[0];
+ int i;
/* Free up all the SKBs. */
tg3_free_rings(tp);
- /* Zero out all descriptors. */
- memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
+ for (i = 0; i < tp->irq_cnt; i++) {
+ struct tg3_napi *tnapi = &tp->napi[i];
- tnapi->rx_rcb_ptr = 0;
- memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
+ tnapi->last_tag = 0;
+ tnapi->last_irq_tag = 0;
+ tnapi->hw_status->status = 0;
+ tnapi->hw_status->status_tag = 0;
+ memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
+
+ tnapi->tx_prod = 0;
+ tnapi->tx_cons = 0;
+ memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
+
+ tnapi->rx_rcb_ptr = 0;
+ memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
+ }
return tg3_rx_prodring_alloc(tp, &tp->prodring[0]);
}
*/
static void tg3_free_consistent(struct tg3 *tp)
{
- struct tg3_napi *tnapi = &tp->napi[0];
+ int i;
- kfree(tnapi->tx_buffers);
- tnapi->tx_buffers = NULL;
- if (tnapi->tx_ring) {
- pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
- tnapi->tx_ring, tnapi->tx_desc_mapping);
- tnapi->tx_ring = NULL;
- }
- if (tnapi->rx_rcb) {
- pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
- tnapi->rx_rcb, tnapi->rx_rcb_mapping);
- tnapi->rx_rcb = NULL;
- }
- if (tnapi->hw_status) {
- pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
- tnapi->hw_status,
- tnapi->status_mapping);
- tnapi->hw_status = NULL;
+ for (i = 0; i < tp->irq_cnt; i++) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+
+ if (tnapi->tx_ring) {
+ pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
+ tnapi->tx_ring, tnapi->tx_desc_mapping);
+ tnapi->tx_ring = NULL;
+ }
+
+ kfree(tnapi->tx_buffers);
+ tnapi->tx_buffers = NULL;
+
+ if (tnapi->rx_rcb) {
+ pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
+ tnapi->rx_rcb,
+ tnapi->rx_rcb_mapping);
+ tnapi->rx_rcb = NULL;
+ }
+
+ if (tnapi->hw_status) {
+ pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
+ tnapi->hw_status,
+ tnapi->status_mapping);
+ tnapi->hw_status = NULL;
+ }
}
+
if (tp->hw_stats) {
pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
tp->hw_stats, tp->stats_mapping);
tp->hw_stats = NULL;
}
+
tg3_rx_prodring_fini(tp, &tp->prodring[0]);
}
*/
static int tg3_alloc_consistent(struct tg3 *tp)
{
- struct tg3_napi *tnapi = &tp->napi[0];
+ int i;
if (tg3_rx_prodring_init(tp, &tp->prodring[0]))
return -ENOMEM;
- tnapi->tx_buffers = kzalloc(sizeof(struct tx_ring_info) *
- TG3_TX_RING_SIZE, GFP_KERNEL);
- if (!tnapi->tx_buffers)
+ tp->hw_stats = pci_alloc_consistent(tp->pdev,
+ sizeof(struct tg3_hw_stats),
+ &tp->stats_mapping);
+ if (!tp->hw_stats)
goto err_out;
- tnapi->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
- &tnapi->tx_desc_mapping);
- if (!tnapi->tx_ring)
- goto err_out;
+ memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
- tnapi->hw_status = pci_alloc_consistent(tp->pdev,
- TG3_HW_STATUS_SIZE,
- &tnapi->status_mapping);
- if (!tnapi->hw_status)
- goto err_out;
+ for (i = 0; i < tp->irq_cnt; i++) {
+ struct tg3_napi *tnapi = &tp->napi[i];
- memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
+ tnapi->hw_status = pci_alloc_consistent(tp->pdev,
+ TG3_HW_STATUS_SIZE,
+ &tnapi->status_mapping);
+ if (!tnapi->hw_status)
+ goto err_out;
- tnapi->rx_rcb = pci_alloc_consistent(tp->pdev,
- TG3_RX_RCB_RING_BYTES(tp),
- &tnapi->rx_rcb_mapping);
- if (!tnapi->rx_rcb)
- goto err_out;
+ memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
- memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
+ tnapi->rx_rcb = pci_alloc_consistent(tp->pdev,
+ TG3_RX_RCB_RING_BYTES(tp),
+ &tnapi->rx_rcb_mapping);
+ if (!tnapi->rx_rcb)
+ goto err_out;
- tp->hw_stats = pci_alloc_consistent(tp->pdev,
- sizeof(struct tg3_hw_stats),
- &tp->stats_mapping);
- if (!tp->hw_stats)
- goto err_out;
+ memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
- memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
+ tnapi->tx_buffers = kzalloc(sizeof(struct tx_ring_info) *
+ TG3_TX_RING_SIZE, GFP_KERNEL);
+ if (!tnapi->tx_buffers)
+ goto err_out;
+
+ tnapi->tx_ring = pci_alloc_consistent(tp->pdev,
+ TG3_TX_RING_BYTES,
+ &tnapi->tx_desc_mapping);
+ if (!tnapi->tx_ring)
+ goto err_out;
+ }
return 0;
static int tg3_abort_hw(struct tg3 *tp, int silent)
{
int i, err;
- struct tg3_napi *tnapi = &tp->napi[0];
tg3_disable_ints(tp);
err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
- if (tnapi->hw_status)
- memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
+ for (i = 0; i < tp->irq_cnt; i++) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+ if (tnapi->hw_status)
+ memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
+ }
if (tp->hw_stats)
memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
* sharing or irqpoll.
*/
tp->tg3_flags |= TG3_FLAG_CHIP_RESETTING;
- if (tp->napi[0].hw_status) {
- tp->napi[0].hw_status->status = 0;
- tp->napi[0].hw_status->status_tag = 0;
+ for (i = 0; i < tp->irq_cnt; i++) {
+ struct tg3_napi *tnapi = &tp->napi[i];
+ if (tnapi->hw_status) {
+ tnapi->hw_status->status = 0;
+ tnapi->hw_status->status_tag = 0;
+ }
+ tnapi->last_tag = 0;
+ tnapi->last_irq_tag = 0;
}
- tp->napi[0].last_tag = 0;
- tp->napi[0].last_irq_tag = 0;
smp_mb();
for (i = 0; i < tp->irq_cnt; i++)
static void __tg3_set_rx_mode(struct net_device *);
static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
{
- tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
- tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
- tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
- tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
- if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
- tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
- tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
+ int i;
+
+ if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) {
+ tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
+ tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
+ tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
+
+ tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
+ tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
+ tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
+ } else {
+ tw32(HOSTCC_TXCOL_TICKS, 0);
+ tw32(HOSTCC_TXMAX_FRAMES, 0);
+ tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
+
+ tw32(HOSTCC_RXCOL_TICKS, 0);
+ tw32(HOSTCC_RXMAX_FRAMES, 0);
+ tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
}
- tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
- tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
+
if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
u32 val = ec->stats_block_coalesce_usecs;
+ tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
+ tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
+
if (!netif_carrier_ok(tp->dev))
val = 0;
tw32(HOSTCC_STAT_COAL_TICKS, val);
}
+
+ for (i = 0; i < tp->irq_cnt - 1; i++) {
+ u32 reg;
+
+ reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
+ tw32(reg, ec->rx_coalesce_usecs);
+ reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
+ tw32(reg, ec->tx_coalesce_usecs);
+ reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
+ tw32(reg, ec->rx_max_coalesced_frames);
+ reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
+ tw32(reg, ec->tx_max_coalesced_frames);
+ reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
+ tw32(reg, ec->rx_max_coalesced_frames_irq);
+ reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
+ tw32(reg, ec->tx_max_coalesced_frames_irq);
+ }
+
+ for (; i < tp->irq_max - 1; i++) {
+ tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
+ tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
+ tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
+ tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
+ tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
+ tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
+ }
}
/* tp->lock is held. */
static void tg3_rings_reset(struct tg3 *tp)
{
int i;
- u32 txrcb, rxrcb, limit;
+ u32 stblk, txrcb, rxrcb, limit;
struct tg3_napi *tnapi = &tp->napi[0];
/* Disable all transmit rings but the first. */
tw32_mailbox_f(tp->napi[0].int_mbox, 1);
/* Zero mailbox registers. */
- tp->napi[0].tx_prod = 0;
- tp->napi[0].tx_cons = 0;
- tw32_mailbox(tp->napi[0].prodmbox, 0);
- tw32_rx_mbox(tp->napi[0].consmbox, 0);
+ if (tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX) {
+ for (i = 1; i < TG3_IRQ_MAX_VECS; i++) {
+ tp->napi[i].tx_prod = 0;
+ tp->napi[i].tx_cons = 0;
+ tw32_mailbox(tp->napi[i].prodmbox, 0);
+ tw32_rx_mbox(tp->napi[i].consmbox, 0);
+ tw32_mailbox_f(tp->napi[i].int_mbox, 1);
+ }
+ } else {
+ tp->napi[0].tx_prod = 0;
+ tp->napi[0].tx_cons = 0;
+ tw32_mailbox(tp->napi[0].prodmbox, 0);
+ tw32_rx_mbox(tp->napi[0].consmbox, 0);
+ }
/* Make sure the NIC-based send BD rings are disabled. */
if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
((u64) tnapi->status_mapping & 0xffffffff));
- tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
- (TG3_TX_RING_SIZE <<
- BDINFO_FLAGS_MAXLEN_SHIFT),
- NIC_SRAM_TX_BUFFER_DESC);
+ if (tnapi->tx_ring) {
+ tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
+ (TG3_TX_RING_SIZE <<
+ BDINFO_FLAGS_MAXLEN_SHIFT),
+ NIC_SRAM_TX_BUFFER_DESC);
+ txrcb += TG3_BDINFO_SIZE;
+ }
+
+ if (tnapi->rx_rcb) {
+ tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
+ (TG3_RX_RCB_RING_SIZE(tp) <<
+ BDINFO_FLAGS_MAXLEN_SHIFT), 0);
+ rxrcb += TG3_BDINFO_SIZE;
+ }
+
+ stblk = HOSTCC_STATBLCK_RING1;
- tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
- (TG3_RX_RCB_RING_SIZE(tp) <<
- BDINFO_FLAGS_MAXLEN_SHIFT), 0);
+ for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
+ u64 mapping = (u64)tnapi->status_mapping;
+ tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
+ tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
+
+ /* Clear status block in ram. */
+ memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
+
+ tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
+ (TG3_TX_RING_SIZE <<
+ BDINFO_FLAGS_MAXLEN_SHIFT),
+ NIC_SRAM_TX_BUFFER_DESC);
+
+ tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
+ (TG3_RX_RCB_RING_SIZE(tp) <<
+ BDINFO_FLAGS_MAXLEN_SHIFT), 0);
+
+ stblk += 8;
+ txrcb += TG3_BDINFO_SIZE;
+ rxrcb += TG3_BDINFO_SIZE;
+ }
}
/* tp->lock is held. */
tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
udelay(100);
+ if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX) {
+ val = tr32(MSGINT_MODE);
+ val |= MSGINT_MODE_MULTIVEC_EN | MSGINT_MODE_ENABLE;
+ tw32(MSGINT_MODE, val);
+ }
+
if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
udelay(40);
tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
- tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
+ val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
+ if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
+ val |= SNDBDI_MODE_MULTI_TXQ_EN;
+ tw32(SNDBDI_MODE, val);
tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
tw32_f(MAC_TX_MODE, tp->tx_mode);
udelay(100);
+ if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) {
+ u32 reg = MAC_RSS_INDIR_TBL_0;
+ u8 *ent = (u8 *)&val;
+
+ /* Setup the indirection table */
+ for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
+ int idx = i % sizeof(val);
+
+ ent[idx] = i % (tp->irq_cnt - 1);
+ if (idx == sizeof(val) - 1) {
+ tw32(reg, val);
+ reg += 4;
+ }
+ }
+
+ /* Setup the "secret" hash key. */
+ tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
+ tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
+ tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
+ tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
+ tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
+ tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
+ tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
+ tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
+ tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
+ tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
+ }
+
tp->rx_mode = RX_MODE_ENABLE;
if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
+ if (tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS)
+ tp->rx_mode |= RX_MODE_RSS_ENABLE |
+ RX_MODE_RSS_ITBL_HASH_BITS_7 |
+ RX_MODE_RSS_IPV6_HASH_EN |
+ RX_MODE_RSS_TCP_IPV6_HASH_EN |
+ RX_MODE_RSS_IPV4_HASH_EN |
+ RX_MODE_RSS_TCP_IPV4_HASH_EN;
+
tw32_f(MAC_RX_MODE, tp->rx_mode);
udelay(10);
tp->irq_cnt = rc;
}
+ tp->tg3_flags3 |= TG3_FLG3_ENABLE_RSS;
+
for (i = 0; i < tp->irq_max; i++)
tp->napi[i].irq_vec = msix_ent[i].vector;
+ tp->dev->real_num_tx_queues = tp->irq_cnt - 1;
+
return true;
}
if (tp->tg3_flags2 & TG3_FLG2_USING_MSI_OR_MSIX) {
u32 msi_mode = tr32(MSGINT_MODE);
+ if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
+ msi_mode |= MSGINT_MODE_MULTIVEC_EN;
tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
}
defcfg:
if (!(tp->tg3_flags2 & TG3_FLG2_USING_MSIX)) {
tp->irq_cnt = 1;
tp->napi[0].irq_vec = tp->pdev->irq;
+ tp->dev->real_num_tx_queues = 1;
}
}
else if (tp->tg3_flags2 & TG3_FLG2_USING_MSI)
pci_disable_msi(tp->pdev);
tp->tg3_flags2 &= ~TG3_FLG2_USING_MSI_OR_MSIX;
+ tp->tg3_flags3 &= ~TG3_FLG3_ENABLE_RSS;
}
static int tg3_open(struct net_device *dev)
if (err)
goto err_out1;
- napi_enable(&tp->napi[0].napi);
+ tg3_napi_enable(tp);
for (i = 0; i < tp->irq_cnt; i++) {
struct tg3_napi *tnapi = &tp->napi[i];
tg3_full_unlock(tp);
- netif_start_queue(dev);
+ netif_tx_start_all_queues(dev);
return 0;
}
err_out2:
- napi_disable(&tp->napi[0].napi);
+ tg3_napi_disable(tp);
tg3_free_consistent(tp);
err_out1:
int i;
struct tg3 *tp = netdev_priv(dev);
- napi_disable(&tp->napi[0].napi);
+ tg3_napi_disable(tp);
cancel_work_sync(&tp->reset_task);
- netif_stop_queue(dev);
+ netif_tx_stop_all_queues(dev);
del_timer_sync(&tp->timer);
goto err_out_free_res;
}
- dev = alloc_etherdev(sizeof(*tp));
+ dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
if (!dev) {
printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
err = -ENOMEM;