]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - drivers/net/tg3.c
tg3: Update version to 3.104
[net-next-2.6.git] / drivers / net / tg3.c
index 2d58406deb00573aed56f0feeea705da1d780961..6e6db955b4a97271c1ef6a6dc277202b1e2df54b 100644 (file)
@@ -68,8 +68,8 @@
 
 #define DRV_MODULE_NAME                "tg3"
 #define PFX DRV_MODULE_NAME    ": "
-#define DRV_MODULE_VERSION     "3.103"
-#define DRV_MODULE_RELDATE     "November 2, 2009"
+#define DRV_MODULE_VERSION     "3.104"
+#define DRV_MODULE_RELDATE     "November 13, 2009"
 
 #define TG3_DEF_MAC_MODE       0
 #define TG3_DEF_RX_MODE                0
 #define TG3_RX_STD_MAP_SZ              TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
 #define TG3_RX_JMB_MAP_SZ              TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
 
+#define TG3_RX_STD_BUFF_RING_SIZE \
+       (sizeof(struct ring_info) * TG3_RX_RING_SIZE)
+
+#define TG3_RX_JMB_BUFF_RING_SIZE \
+       (sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE)
+
 /* minimum number of free TX descriptors required to wake up TX process */
 #define TG3_TX_WAKEUP_THRESH(tnapi)            ((tnapi)->tx_pending / 4)
 
@@ -235,6 +241,9 @@ static struct pci_device_id tg3_pci_tbl[] = {
        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
        {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
+       {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5724)},
        {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
        {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
        {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
@@ -396,7 +405,7 @@ static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
                                       TG3_64BIT_REG_LOW, val);
                return;
        }
-       if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) {
+       if (off == TG3_RX_STD_PROD_IDX_REG) {
                pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
                                       TG3_64BIT_REG_LOW, val);
                return;
@@ -4397,6 +4406,17 @@ static void tg3_tx(struct tg3_napi *tnapi)
        }
 }
 
+static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
+{
+       if (!ri->skb)
+               return;
+
+       pci_unmap_single(tp->pdev, pci_unmap_addr(ri, mapping),
+                        map_sz, PCI_DMA_FROMDEVICE);
+       dev_kfree_skb_any(ri->skb);
+       ri->skb = NULL;
+}
+
 /* Returns size of skb allocated or < 0 on error.
  *
  * We only need to fill in the address because the other members
@@ -4408,16 +4428,14 @@ static void tg3_tx(struct tg3_napi *tnapi)
  * buffers the cpu only reads the last cacheline of the RX descriptor
  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
  */
-static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key,
-                           int src_idx, u32 dest_idx_unmasked)
+static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
+                           u32 opaque_key, u32 dest_idx_unmasked)
 {
-       struct tg3 *tp = tnapi->tp;
        struct tg3_rx_buffer_desc *desc;
        struct ring_info *map, *src_map;
        struct sk_buff *skb;
        dma_addr_t mapping;
        int skb_size, dest_idx;
-       struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
 
        src_map = NULL;
        switch (opaque_key) {
@@ -4425,8 +4443,6 @@ static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key,
                dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
                desc = &tpr->rx_std[dest_idx];
                map = &tpr->rx_std_buffers[dest_idx];
-               if (src_idx >= 0)
-                       src_map = &tpr->rx_std_buffers[src_idx];
                skb_size = tp->rx_pkt_map_sz;
                break;
 
@@ -4434,8 +4450,6 @@ static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key,
                dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
                desc = &tpr->rx_jmb[dest_idx].std;
                map = &tpr->rx_jmb_buffers[dest_idx];
-               if (src_idx >= 0)
-                       src_map = &tpr->rx_jmb_buffers[src_idx];
                skb_size = TG3_RX_JMB_MAP_SZ;
                break;
 
@@ -4465,9 +4479,6 @@ static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key,
        map->skb = skb;
        pci_unmap_addr_set(map, mapping, mapping);
 
-       if (src_map != NULL)
-               src_map->skb = NULL;
-
        desc->addr_hi = ((u64)mapping >> 32);
        desc->addr_lo = ((u64)mapping & 0xffffffff);
 
@@ -4478,30 +4489,32 @@ static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key,
  * members of the RX descriptor are invariant.  See notes above
  * tg3_alloc_rx_skb for full details.
  */
-static void tg3_recycle_rx(struct tg3_napi *tnapi, u32 opaque_key,
-                          int src_idx, u32 dest_idx_unmasked)
+static void tg3_recycle_rx(struct tg3_napi *tnapi,
+                          struct tg3_rx_prodring_set *dpr,
+                          u32 opaque_key, int src_idx,
+                          u32 dest_idx_unmasked)
 {
        struct tg3 *tp = tnapi->tp;
        struct tg3_rx_buffer_desc *src_desc, *dest_desc;
        struct ring_info *src_map, *dest_map;
        int dest_idx;
-       struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
+       struct tg3_rx_prodring_set *spr = &tp->prodring[0];
 
        switch (opaque_key) {
        case RXD_OPAQUE_RING_STD:
                dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
-               dest_desc = &tpr->rx_std[dest_idx];
-               dest_map = &tpr->rx_std_buffers[dest_idx];
-               src_desc = &tpr->rx_std[src_idx];
-               src_map = &tpr->rx_std_buffers[src_idx];
+               dest_desc = &dpr->rx_std[dest_idx];
+               dest_map = &dpr->rx_std_buffers[dest_idx];
+               src_desc = &spr->rx_std[src_idx];
+               src_map = &spr->rx_std_buffers[src_idx];
                break;
 
        case RXD_OPAQUE_RING_JUMBO:
                dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
-               dest_desc = &tpr->rx_jmb[dest_idx].std;
-               dest_map = &tpr->rx_jmb_buffers[dest_idx];
-               src_desc = &tpr->rx_jmb[src_idx].std;
-               src_map = &tpr->rx_jmb_buffers[src_idx];
+               dest_desc = &dpr->rx_jmb[dest_idx].std;
+               dest_map = &dpr->rx_jmb_buffers[dest_idx];
+               src_desc = &spr->rx_jmb[src_idx].std;
+               src_map = &spr->rx_jmb_buffers[src_idx];
                break;
 
        default:
@@ -4513,7 +4526,6 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi, u32 opaque_key,
                           pci_unmap_addr(src_map, mapping));
        dest_desc->addr_hi = src_desc->addr_hi;
        dest_desc->addr_lo = src_desc->addr_lo;
-
        src_map->skb = NULL;
 }
 
@@ -4545,10 +4557,11 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
 {
        struct tg3 *tp = tnapi->tp;
        u32 work_mask, rx_std_posted = 0;
+       u32 std_prod_idx, jmb_prod_idx;
        u32 sw_idx = tnapi->rx_rcb_ptr;
        u16 hw_idx;
        int received;
-       struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
+       struct tg3_rx_prodring_set *tpr = tnapi->prodring;
 
        hw_idx = *(tnapi->rx_rcb_prod_idx);
        /*
@@ -4558,7 +4571,10 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
        rmb();
        work_mask = 0;
        received = 0;
+       std_prod_idx = tpr->rx_std_prod_idx;
+       jmb_prod_idx = tpr->rx_jmb_prod_idx;
        while (sw_idx != hw_idx && budget > 0) {
+               struct ring_info *ri;
                struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
                unsigned int len;
                struct sk_buff *skb;
@@ -4568,16 +4584,16 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
                desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
                opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
                if (opaque_key == RXD_OPAQUE_RING_STD) {
-                       struct ring_info *ri = &tpr->rx_std_buffers[desc_idx];
+                       ri = &tp->prodring[0].rx_std_buffers[desc_idx];
                        dma_addr = pci_unmap_addr(ri, mapping);
                        skb = ri->skb;
-                       post_ptr = &tpr->rx_std_ptr;
+                       post_ptr = &std_prod_idx;
                        rx_std_posted++;
                } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
-                       struct ring_info *ri = &tpr->rx_jmb_buffers[desc_idx];
+                       ri = &tp->prodring[0].rx_jmb_buffers[desc_idx];
                        dma_addr = pci_unmap_addr(ri, mapping);
                        skb = ri->skb;
-                       post_ptr = &tpr->rx_jmb_ptr;
+                       post_ptr = &jmb_prod_idx;
                } else
                        goto next_pkt_nopost;
 
@@ -4586,7 +4602,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
                if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
                    (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
                drop_it:
-                       tg3_recycle_rx(tnapi, opaque_key,
+                       tg3_recycle_rx(tnapi, tpr, opaque_key,
                                       desc_idx, *post_ptr);
                drop_it_no_recycle:
                        /* Other statistics kept track of by card. */
@@ -4606,11 +4622,13 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
                ) {
                        int skb_size;
 
-                       skb_size = tg3_alloc_rx_skb(tnapi, opaque_key,
-                                                   desc_idx, *post_ptr);
+                       skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
+                                                   *post_ptr);
                        if (skb_size < 0)
                                goto drop_it;
 
+                       ri->skb = NULL;
+
                        pci_unmap_single(tp->pdev, dma_addr, skb_size,
                                         PCI_DMA_FROMDEVICE);
 
@@ -4618,7 +4636,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
                } else {
                        struct sk_buff *copy_skb;
 
-                       tg3_recycle_rx(tnapi, opaque_key,
+                       tg3_recycle_rx(tnapi, tpr, opaque_key,
                                       desc_idx, *post_ptr);
 
                        copy_skb = netdev_alloc_skb(tp->dev,
@@ -4669,9 +4687,7 @@ next_pkt:
 
                if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
                        u32 idx = *post_ptr % TG3_RX_RING_SIZE;
-
-                       tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
-                                    TG3_64BIT_REG_LOW, idx);
+                       tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, idx);
                        work_mask &= ~RXD_OPAQUE_RING_STD;
                        rx_std_posted = 0;
                }
@@ -4691,33 +4707,45 @@ next_pkt_nopost:
        tw32_rx_mbox(tnapi->consmbox, sw_idx);
 
        /* Refill RX ring(s). */
-       if (work_mask & RXD_OPAQUE_RING_STD) {
-               sw_idx = tpr->rx_std_ptr % TG3_RX_RING_SIZE;
-               tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
-                            sw_idx);
-       }
-       if (work_mask & RXD_OPAQUE_RING_JUMBO) {
-               sw_idx = tpr->rx_jmb_ptr % TG3_RX_JUMBO_RING_SIZE;
-               tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
-                            sw_idx);
+       if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) || tnapi == &tp->napi[1]) {
+               if (work_mask & RXD_OPAQUE_RING_STD) {
+                       tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
+                       tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
+                                    tpr->rx_std_prod_idx);
+               }
+               if (work_mask & RXD_OPAQUE_RING_JUMBO) {
+                       tpr->rx_jmb_prod_idx = jmb_prod_idx %
+                                              TG3_RX_JUMBO_RING_SIZE;
+                       tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
+                                    tpr->rx_jmb_prod_idx);
+               }
+               mmiowb();
+       } else if (work_mask) {
+               /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
+                * updated before the producer indices can be updated.
+                */
+               smp_wmb();
+
+               tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
+               tpr->rx_jmb_prod_idx = jmb_prod_idx % TG3_RX_JUMBO_RING_SIZE;
+
+               napi_schedule(&tp->napi[1].napi);
        }
-       mmiowb();
 
        return received;
 }
 
-static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
+static void tg3_poll_link(struct tg3 *tp)
 {
-       struct tg3 *tp = tnapi->tp;
-       struct tg3_hw_status *sblk = tnapi->hw_status;
-
        /* handle link change and other phy events */
        if (!(tp->tg3_flags &
              (TG3_FLAG_USE_LINKCHG_REG |
               TG3_FLAG_POLL_SERDES))) {
+               struct tg3_hw_status *sblk = tp->napi[0].hw_status;
+
                if (sblk->status & SD_STATUS_LINK_CHG) {
                        sblk->status = SD_STATUS_UPDATED |
-                               (sblk->status & ~SD_STATUS_LINK_CHG);
+                                      (sblk->status & ~SD_STATUS_LINK_CHG);
                        spin_lock(&tp->lock);
                        if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
                                tw32_f(MAC_STATUS,
@@ -4731,6 +4759,98 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
                        spin_unlock(&tp->lock);
                }
        }
+}
+
+static void tg3_rx_prodring_xfer(struct tg3 *tp,
+                                struct tg3_rx_prodring_set *dpr,
+                                struct tg3_rx_prodring_set *spr)
+{
+       u32 si, di, cpycnt, src_prod_idx;
+       int i;
+
+       while (1) {
+               src_prod_idx = spr->rx_std_prod_idx;
+
+               /* Make sure updates to the rx_std_buffers[] entries and the
+                * standard producer index are seen in the correct order.
+                */
+               smp_rmb();
+
+               if (spr->rx_std_cons_idx == src_prod_idx)
+                       break;
+
+               if (spr->rx_std_cons_idx < src_prod_idx)
+                       cpycnt = src_prod_idx - spr->rx_std_cons_idx;
+               else
+                       cpycnt = TG3_RX_RING_SIZE - spr->rx_std_cons_idx;
+
+               cpycnt = min(cpycnt, TG3_RX_RING_SIZE - dpr->rx_std_prod_idx);
+
+               si = spr->rx_std_cons_idx;
+               di = dpr->rx_std_prod_idx;
+
+               memcpy(&dpr->rx_std_buffers[di],
+                      &spr->rx_std_buffers[si],
+                      cpycnt * sizeof(struct ring_info));
+
+               for (i = 0; i < cpycnt; i++, di++, si++) {
+                       struct tg3_rx_buffer_desc *sbd, *dbd;
+                       sbd = &spr->rx_std[si];
+                       dbd = &dpr->rx_std[di];
+                       dbd->addr_hi = sbd->addr_hi;
+                       dbd->addr_lo = sbd->addr_lo;
+               }
+
+               spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) %
+                                      TG3_RX_RING_SIZE;
+               dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) %
+                                      TG3_RX_RING_SIZE;
+       }
+
+       while (1) {
+               src_prod_idx = spr->rx_jmb_prod_idx;
+
+               /* Make sure updates to the rx_jmb_buffers[] entries and
+                * the jumbo producer index are seen in the correct order.
+                */
+               smp_rmb();
+
+               if (spr->rx_jmb_cons_idx == src_prod_idx)
+                       break;
+
+               if (spr->rx_jmb_cons_idx < src_prod_idx)
+                       cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
+               else
+                       cpycnt = TG3_RX_JUMBO_RING_SIZE - spr->rx_jmb_cons_idx;
+
+               cpycnt = min(cpycnt,
+                            TG3_RX_JUMBO_RING_SIZE - dpr->rx_jmb_prod_idx);
+
+               si = spr->rx_jmb_cons_idx;
+               di = dpr->rx_jmb_prod_idx;
+
+               memcpy(&dpr->rx_jmb_buffers[di],
+                      &spr->rx_jmb_buffers[si],
+                      cpycnt * sizeof(struct ring_info));
+
+               for (i = 0; i < cpycnt; i++, di++, si++) {
+                       struct tg3_rx_buffer_desc *sbd, *dbd;
+                       sbd = &spr->rx_jmb[si].std;
+                       dbd = &dpr->rx_jmb[di].std;
+                       dbd->addr_hi = sbd->addr_hi;
+                       dbd->addr_lo = sbd->addr_lo;
+               }
+
+               spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) %
+                                      TG3_RX_JUMBO_RING_SIZE;
+               dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) %
+                                      TG3_RX_JUMBO_RING_SIZE;
+       }
+}
+
+static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
+{
+       struct tg3 *tp = tnapi->tp;
 
        /* run TX completion thread */
        if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
@@ -4746,6 +4866,74 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
        if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
                work_done += tg3_rx(tnapi, budget - work_done);
 
+       if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) {
+               int i;
+               u32 std_prod_idx = tp->prodring[0].rx_std_prod_idx;
+               u32 jmb_prod_idx = tp->prodring[0].rx_jmb_prod_idx;
+
+               for (i = 2; i < tp->irq_cnt; i++)
+                       tg3_rx_prodring_xfer(tp, tnapi->prodring,
+                                            tp->napi[i].prodring);
+
+               wmb();
+
+               if (std_prod_idx != tp->prodring[0].rx_std_prod_idx) {
+                       u32 mbox = TG3_RX_STD_PROD_IDX_REG;
+                       tw32_rx_mbox(mbox, tp->prodring[0].rx_std_prod_idx);
+               }
+
+               if (jmb_prod_idx != tp->prodring[0].rx_jmb_prod_idx) {
+                       u32 mbox = TG3_RX_JMB_PROD_IDX_REG;
+                       tw32_rx_mbox(mbox, tp->prodring[0].rx_jmb_prod_idx);
+               }
+
+               mmiowb();
+       }
+
+       return work_done;
+}
+
+static int tg3_poll_msix(struct napi_struct *napi, int budget)
+{
+       struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
+       struct tg3 *tp = tnapi->tp;
+       int work_done = 0;
+       struct tg3_hw_status *sblk = tnapi->hw_status;
+
+       while (1) {
+               work_done = tg3_poll_work(tnapi, work_done, budget);
+
+               if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
+                       goto tx_recovery;
+
+               if (unlikely(work_done >= budget))
+                       break;
+
+               /* tp->last_tag is used in tg3_restart_ints() below
+                * to tell the hw how much work has been processed,
+                * so we must read it before checking for more work.
+                */
+               tnapi->last_tag = sblk->status_tag;
+               tnapi->last_irq_tag = tnapi->last_tag;
+               rmb();
+
+               /* check for RX/TX work to do */
+               if (sblk->idx[0].tx_consumer == tnapi->tx_cons &&
+                   *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr) {
+                       napi_complete(napi);
+                       /* Reenable interrupts. */
+                       tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
+                       mmiowb();
+                       break;
+               }
+       }
+
+       return work_done;
+
+tx_recovery:
+       /* work_done is guaranteed to be less than budget. */
+       napi_complete(napi);
+       schedule_work(&tp->reset_task);
        return work_done;
 }
 
@@ -4757,6 +4945,8 @@ static int tg3_poll(struct napi_struct *napi, int budget)
        struct tg3_hw_status *sblk = tnapi->hw_status;
 
        while (1) {
+               tg3_poll_link(tp);
+
                work_done = tg3_poll_work(tnapi, work_done, budget);
 
                if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
@@ -5653,36 +5843,33 @@ static void tg3_rx_prodring_free(struct tg3 *tp,
                                 struct tg3_rx_prodring_set *tpr)
 {
        int i;
-       struct ring_info *rxp;
 
-       for (i = 0; i < TG3_RX_RING_SIZE; i++) {
-               rxp = &tpr->rx_std_buffers[i];
-
-               if (rxp->skb == NULL)
-                       continue;
+       if (tpr != &tp->prodring[0]) {
+               for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
+                    i = (i + 1) % TG3_RX_RING_SIZE)
+                       tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
+                                       tp->rx_pkt_map_sz);
+
+               if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
+                       for (i = tpr->rx_jmb_cons_idx;
+                            i != tpr->rx_jmb_prod_idx;
+                            i = (i + 1) % TG3_RX_JUMBO_RING_SIZE) {
+                               tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
+                                               TG3_RX_JMB_MAP_SZ);
+                       }
+               }
 
-               pci_unmap_single(tp->pdev,
-                                pci_unmap_addr(rxp, mapping),
-                                tp->rx_pkt_map_sz,
-                                PCI_DMA_FROMDEVICE);
-               dev_kfree_skb_any(rxp->skb);
-               rxp->skb = NULL;
+               return;
        }
 
-       if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
-               for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
-                       rxp = &tpr->rx_jmb_buffers[i];
-
-                       if (rxp->skb == NULL)
-                               continue;
+       for (i = 0; i < TG3_RX_RING_SIZE; i++)
+               tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
+                               tp->rx_pkt_map_sz);
 
-                       pci_unmap_single(tp->pdev,
-                                        pci_unmap_addr(rxp, mapping),
-                                        TG3_RX_JMB_MAP_SZ,
-                                        PCI_DMA_FROMDEVICE);
-                       dev_kfree_skb_any(rxp->skb);
-                       rxp->skb = NULL;
-               }
+       if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
+               for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++)
+                       tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
+                                       TG3_RX_JMB_MAP_SZ);
        }
 }
 
@@ -5697,7 +5884,19 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
                                 struct tg3_rx_prodring_set *tpr)
 {
        u32 i, rx_pkt_dma_sz;
-       struct tg3_napi *tnapi = &tp->napi[0];
+
+       tpr->rx_std_cons_idx = 0;
+       tpr->rx_std_prod_idx = 0;
+       tpr->rx_jmb_cons_idx = 0;
+       tpr->rx_jmb_prod_idx = 0;
+
+       if (tpr != &tp->prodring[0]) {
+               memset(&tpr->rx_std_buffers[0], 0, TG3_RX_STD_BUFF_RING_SIZE);
+               if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE)
+                       memset(&tpr->rx_jmb_buffers[0], 0,
+                              TG3_RX_JMB_BUFF_RING_SIZE);
+               goto done;
+       }
 
        /* Zero out all descriptors. */
        memset(tpr->rx_std, 0, TG3_RX_RING_BYTES);
@@ -5724,7 +5923,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
 
        /* Now allocate fresh SKBs for each rx ring. */
        for (i = 0; i < tp->rx_pending; i++) {
-               if (tg3_alloc_rx_skb(tnapi, RXD_OPAQUE_RING_STD, -1, i) < 0) {
+               if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
                        printk(KERN_WARNING PFX
                               "%s: Using a smaller RX standard ring, "
                               "only %d out of %d buffers were allocated "
@@ -5755,8 +5954,8 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
                }
 
                for (i = 0; i < tp->rx_jumbo_pending; i++) {
-                       if (tg3_alloc_rx_skb(tnapi, RXD_OPAQUE_RING_JUMBO,
-                                            -1, i) < 0) {
+                       if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO,
+                                            i) < 0) {
                                printk(KERN_WARNING PFX
                                       "%s: Using a smaller RX jumbo ring, "
                                       "only %d out of %d buffers were "
@@ -5800,8 +5999,7 @@ static void tg3_rx_prodring_fini(struct tg3 *tp,
 static int tg3_rx_prodring_init(struct tg3 *tp,
                                struct tg3_rx_prodring_set *tpr)
 {
-       tpr->rx_std_buffers = kzalloc(sizeof(struct ring_info) *
-                                     TG3_RX_RING_SIZE, GFP_KERNEL);
+       tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE, GFP_KERNEL);
        if (!tpr->rx_std_buffers)
                return -ENOMEM;
 
@@ -5811,8 +6009,7 @@ static int tg3_rx_prodring_init(struct tg3 *tp,
                goto err_out;
 
        if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
-               tpr->rx_jmb_buffers = kzalloc(sizeof(struct ring_info) *
-                                             TG3_RX_JUMBO_RING_SIZE,
+               tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE,
                                              GFP_KERNEL);
                if (!tpr->rx_jmb_buffers)
                        goto err_out;
@@ -5868,9 +6065,10 @@ static void tg3_free_rings(struct tg3 *tp)
 
                        dev_kfree_skb_any(skb);
                }
-       }
 
-       tg3_rx_prodring_free(tp, &tp->prodring[0]);
+               if (tp->irq_cnt == 1 || j != tp->irq_cnt - 1)
+                       tg3_rx_prodring_free(tp, &tp->prodring[j]);
+       }
 }
 
 /* Initialize tx/rx rings for packet processing.
@@ -5904,9 +6102,13 @@ static int tg3_init_rings(struct tg3 *tp)
                tnapi->rx_rcb_ptr = 0;
                if (tnapi->rx_rcb)
                        memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
+
+               if ((tp->irq_cnt == 1 || i != tp->irq_cnt - 1) &&
+                       tg3_rx_prodring_alloc(tp, &tp->prodring[i]))
+                       return -ENOMEM;
        }
 
-       return tg3_rx_prodring_alloc(tp, &tp->prodring[0]);
+       return 0;
 }
 
 /*
@@ -5950,7 +6152,8 @@ static void tg3_free_consistent(struct tg3 *tp)
                tp->hw_stats = NULL;
        }
 
-       tg3_rx_prodring_fini(tp, &tp->prodring[0]);
+       for (i = 0; i < (tp->irq_cnt == 1 ? 1 : tp->irq_cnt - 1); i++)
+               tg3_rx_prodring_fini(tp, &tp->prodring[i]);
 }
 
 /*
@@ -5961,8 +6164,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
 {
        int i;
 
-       if (tg3_rx_prodring_init(tp, &tp->prodring[0]))
-               return -ENOMEM;
+       for (i = 0; i < (tp->irq_cnt == 1 ? 1 : tp->irq_cnt - 1); i++) {
+               if (tg3_rx_prodring_init(tp, &tp->prodring[i]))
+                       goto err_out;
+       }
 
        tp->hw_stats = pci_alloc_consistent(tp->pdev,
                                            sizeof(struct tg3_hw_stats),
@@ -6006,6 +6211,11 @@ static int tg3_alloc_consistent(struct tg3 *tp)
                        break;
                }
 
+               if (tp->irq_cnt == 1)
+                       tnapi->prodring = &tp->prodring[0];
+               else if (i)
+                       tnapi->prodring = &tp->prodring[i - 1];
+
                /*
                 * If multivector RSS is enabled, vector 0 does not handle
                 * rx or tx interrupts.  Don't allocate any resources for it.
@@ -7294,9 +7504,12 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
        if (err)
                return err;
 
-       if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
-           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 &&
-           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) {
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
+               val = tr32(TG3PCI_DMA_RW_CTRL) &
+                     ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
+               tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
+       } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
+                  GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
                /* This value is determined during the probe time DMA
                 * engine test, tg3_test_dma.
                 */
@@ -7462,14 +7675,12 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
 
        tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
 
-       tpr->rx_std_ptr = tp->rx_pending;
-       tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
-                    tpr->rx_std_ptr);
+       tpr->rx_std_prod_idx = tp->rx_pending;
+       tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
 
-       tpr->rx_jmb_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
+       tpr->rx_jmb_prod_idx = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
                          tp->rx_jumbo_pending : 0;
-       tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
-                    tpr->rx_jmb_ptr);
+       tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
 
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
                tw32(STD_REPLENISH_LWM, 32);
@@ -12494,10 +12705,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
        if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
                u32 prod_id_asic_rev;
 
-               if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717C ||
-                   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717S ||
-                   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718C ||
-                   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718S)
+               if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
+                   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
+                   tp->pdev->device == TG3PCI_DEVICE_TIGON3_5724)
                        pci_read_config_dword(tp->pdev,
                                              TG3PCI_GEN2_PRODID_ASICREV,
                                              &prod_id_asic_rev);
@@ -13329,6 +13539,11 @@ static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
 #endif
 #endif
 
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
+               val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
+               goto out;
+       }
+
        if (!goal)
                goto out;
 
@@ -13523,7 +13738,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
 {
        dma_addr_t buf_dma;
        u32 *buf, saved_dma_rwctrl;
-       int ret;
+       int ret = 0;
 
        buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
        if (!buf) {
@@ -13536,6 +13751,9 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
 
        tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
 
+       if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
+               goto out;
+
        if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
                /* DMA read watermark not used on PCIE */
                tp->dma_rwctrl |= 0x00180000;
@@ -13608,7 +13826,6 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
        tg3_switch_clocks(tp);
 #endif
 
-       ret = 0;
        if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
            GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
                goto out;
@@ -14029,51 +14246,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
        tp->rx_pending = TG3_DEF_RX_RING_PENDING;
        tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
 
-       intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
-       rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
-       sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
-       for (i = 0; i < TG3_IRQ_MAX_VECS; i++) {
-               struct tg3_napi *tnapi = &tp->napi[i];
-
-               tnapi->tp = tp;
-               tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
-
-               tnapi->int_mbox = intmbx;
-               if (i < 4)
-                       intmbx += 0x8;
-               else
-                       intmbx += 0x4;
-
-               tnapi->consmbox = rcvmbx;
-               tnapi->prodmbox = sndmbx;
-
-               if (i)
-                       tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
-               else
-                       tnapi->coal_now = HOSTCC_MODE_NOW;
-
-               if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX))
-                       break;
-
-               /*
-                * If we support MSIX, we'll be using RSS.  If we're using
-                * RSS, the first vector only handles link interrupts and the
-                * remaining vectors handle rx and tx interrupts.  Reuse the
-                * mailbox values for the next iteration.  The values we setup
-                * above are still useful for the single vectored mode.
-                */
-               if (!i)
-                       continue;
-
-               rcvmbx += 0x8;
-
-               if (sndmbx & 0x4)
-                       sndmbx -= 0x4;
-               else
-                       sndmbx += 0xc;
-       }
-
-       netif_napi_add(dev, &tp->napi[0].napi, tg3_poll, 64);
        dev->ethtool_ops = &tg3_ethtool_ops;
        dev->watchdog_timeo = TG3_TX_TIMEOUT;
        dev->irq = pdev->irq;
@@ -14216,6 +14388,53 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
        tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
        tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
 
+       intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
+       rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
+       sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
+       for (i = 0; i < TG3_IRQ_MAX_VECS; i++) {
+               struct tg3_napi *tnapi = &tp->napi[i];
+
+               tnapi->tp = tp;
+               tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
+
+               tnapi->int_mbox = intmbx;
+               if (i < 4)
+                       intmbx += 0x8;
+               else
+                       intmbx += 0x4;
+
+               tnapi->consmbox = rcvmbx;
+               tnapi->prodmbox = sndmbx;
+
+               if (i) {
+                       tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
+                       netif_napi_add(dev, &tnapi->napi, tg3_poll_msix, 64);
+               } else {
+                       tnapi->coal_now = HOSTCC_MODE_NOW;
+                       netif_napi_add(dev, &tnapi->napi, tg3_poll, 64);
+               }
+
+               if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX))
+                       break;
+
+               /*
+                * If we support MSIX, we'll be using RSS.  If we're using
+                * RSS, the first vector only handles link interrupts and the
+                * remaining vectors handle rx and tx interrupts.  Reuse the
+                * mailbox values for the next iteration.  The values we setup
+                * above are still useful for the single vectored mode.
+                */
+               if (!i)
+                       continue;
+
+               rcvmbx += 0x8;
+
+               if (sndmbx & 0x4)
+                       sndmbx -= 0x4;
+               else
+                       sndmbx += 0xc;
+       }
+
        tg3_init_coal(tp);
 
        pci_set_drvdata(pdev, dev);