]> bbs.cooldavid.org Git - net-next-2.6.git/blobdiff - drivers/net/bnx2.c
bnx2: use device model DMA API
[net-next-2.6.git] / drivers / net / bnx2.c
index 522de9f818be937ae0925fb481ab106a1f0b5d8e..ce3217b441a477336f155c9f93a7785230a1374e 100644 (file)
@@ -58,8 +58,8 @@
 #include "bnx2_fw.h"
 
 #define DRV_MODULE_NAME                "bnx2"
-#define DRV_MODULE_VERSION     "2.0.15"
-#define DRV_MODULE_RELDATE     "May 4, 2010"
+#define DRV_MODULE_VERSION     "2.0.16"
+#define DRV_MODULE_RELDATE     "July 2, 2010"
 #define FW_MIPS_FILE_06                "bnx2/bnx2-mips-06-5.0.0.j6.fw"
 #define FW_RV2P_FILE_06                "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
 #define FW_MIPS_FILE_09                "bnx2/bnx2-mips-09-5.0.0.j15.fw"
@@ -692,9 +692,9 @@ bnx2_free_tx_mem(struct bnx2 *bp)
                struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
 
                if (txr->tx_desc_ring) {
-                       pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
-                                           txr->tx_desc_ring,
-                                           txr->tx_desc_mapping);
+                       dma_free_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
+                                         txr->tx_desc_ring,
+                                         txr->tx_desc_mapping);
                        txr->tx_desc_ring = NULL;
                }
                kfree(txr->tx_buf_ring);
@@ -714,9 +714,9 @@ bnx2_free_rx_mem(struct bnx2 *bp)
 
                for (j = 0; j < bp->rx_max_ring; j++) {
                        if (rxr->rx_desc_ring[j])
-                               pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
-                                                   rxr->rx_desc_ring[j],
-                                                   rxr->rx_desc_mapping[j]);
+                               dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
+                                                 rxr->rx_desc_ring[j],
+                                                 rxr->rx_desc_mapping[j]);
                        rxr->rx_desc_ring[j] = NULL;
                }
                vfree(rxr->rx_buf_ring);
@@ -724,9 +724,9 @@ bnx2_free_rx_mem(struct bnx2 *bp)
 
                for (j = 0; j < bp->rx_max_pg_ring; j++) {
                        if (rxr->rx_pg_desc_ring[j])
-                               pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
-                                                   rxr->rx_pg_desc_ring[j],
-                                                   rxr->rx_pg_desc_mapping[j]);
+                               dma_free_coherent(&bp->pdev->dev, RXBD_RING_SIZE,
+                                                 rxr->rx_pg_desc_ring[j],
+                                                 rxr->rx_pg_desc_mapping[j]);
                        rxr->rx_pg_desc_ring[j] = NULL;
                }
                vfree(rxr->rx_pg_ring);
@@ -748,8 +748,8 @@ bnx2_alloc_tx_mem(struct bnx2 *bp)
                        return -ENOMEM;
 
                txr->tx_desc_ring =
-                       pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
-                                            &txr->tx_desc_mapping);
+                       dma_alloc_coherent(&bp->pdev->dev, TXBD_RING_SIZE,
+                                          &txr->tx_desc_mapping, GFP_KERNEL);
                if (txr->tx_desc_ring == NULL)
                        return -ENOMEM;
        }
@@ -776,8 +776,10 @@ bnx2_alloc_rx_mem(struct bnx2 *bp)
 
                for (j = 0; j < bp->rx_max_ring; j++) {
                        rxr->rx_desc_ring[j] =
-                               pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
-                                                    &rxr->rx_desc_mapping[j]);
+                               dma_alloc_coherent(&bp->pdev->dev,
+                                                  RXBD_RING_SIZE,
+                                                  &rxr->rx_desc_mapping[j],
+                                                  GFP_KERNEL);
                        if (rxr->rx_desc_ring[j] == NULL)
                                return -ENOMEM;
 
@@ -795,8 +797,10 @@ bnx2_alloc_rx_mem(struct bnx2 *bp)
 
                for (j = 0; j < bp->rx_max_pg_ring; j++) {
                        rxr->rx_pg_desc_ring[j] =
-                               pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
-                                               &rxr->rx_pg_desc_mapping[j]);
+                               dma_alloc_coherent(&bp->pdev->dev,
+                                                  RXBD_RING_SIZE,
+                                                  &rxr->rx_pg_desc_mapping[j],
+                                                  GFP_KERNEL);
                        if (rxr->rx_pg_desc_ring[j] == NULL)
                                return -ENOMEM;
 
@@ -816,16 +820,16 @@ bnx2_free_mem(struct bnx2 *bp)
 
        for (i = 0; i < bp->ctx_pages; i++) {
                if (bp->ctx_blk[i]) {
-                       pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
-                                           bp->ctx_blk[i],
-                                           bp->ctx_blk_mapping[i]);
+                       dma_free_coherent(&bp->pdev->dev, BCM_PAGE_SIZE,
+                                         bp->ctx_blk[i],
+                                         bp->ctx_blk_mapping[i]);
                        bp->ctx_blk[i] = NULL;
                }
        }
        if (bnapi->status_blk.msi) {
-               pci_free_consistent(bp->pdev, bp->status_stats_size,
-                                   bnapi->status_blk.msi,
-                                   bp->status_blk_mapping);
+               dma_free_coherent(&bp->pdev->dev, bp->status_stats_size,
+                                 bnapi->status_blk.msi,
+                                 bp->status_blk_mapping);
                bnapi->status_blk.msi = NULL;
                bp->stats_blk = NULL;
        }
@@ -846,8 +850,8 @@ bnx2_alloc_mem(struct bnx2 *bp)
        bp->status_stats_size = status_blk_size +
                                sizeof(struct statistics_block);
 
-       status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
-                                         &bp->status_blk_mapping);
+       status_blk = dma_alloc_coherent(&bp->pdev->dev, bp->status_stats_size,
+                                       &bp->status_blk_mapping, GFP_KERNEL);
        if (status_blk == NULL)
                goto alloc_mem_err;
 
@@ -885,9 +889,10 @@ bnx2_alloc_mem(struct bnx2 *bp)
                if (bp->ctx_pages == 0)
                        bp->ctx_pages = 1;
                for (i = 0; i < bp->ctx_pages; i++) {
-                       bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
+                       bp->ctx_blk[i] = dma_alloc_coherent(&bp->pdev->dev,
                                                BCM_PAGE_SIZE,
-                                               &bp->ctx_blk_mapping[i]);
+                                               &bp->ctx_blk_mapping[i],
+                                               GFP_KERNEL);
                        if (bp->ctx_blk[i] == NULL)
                                goto alloc_mem_err;
                }
@@ -2664,19 +2669,19 @@ bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
 }
 
 static inline int
-bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
+bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
 {
        dma_addr_t mapping;
        struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
        struct rx_bd *rxbd =
                &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
-       struct page *page = alloc_page(GFP_ATOMIC);
+       struct page *page = alloc_page(gfp);
 
        if (!page)
                return -ENOMEM;
-       mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
+       mapping = dma_map_page(&bp->pdev->dev, page, 0, PAGE_SIZE,
                               PCI_DMA_FROMDEVICE);
-       if (pci_dma_mapping_error(bp->pdev, mapping)) {
+       if (dma_mapping_error(&bp->pdev->dev, mapping)) {
                __free_page(page);
                return -EIO;
        }
@@ -2697,15 +2702,15 @@ bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
        if (!page)
                return;
 
-       pci_unmap_page(bp->pdev, dma_unmap_addr(rx_pg, mapping), PAGE_SIZE,
-                      PCI_DMA_FROMDEVICE);
+       dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(rx_pg, mapping),
+                      PAGE_SIZE, PCI_DMA_FROMDEVICE);
 
        __free_page(page);
        rx_pg->page = NULL;
 }
 
 static inline int
-bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
+bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index, gfp_t gfp)
 {
        struct sk_buff *skb;
        struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
@@ -2713,7 +2718,7 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
        struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
        unsigned long align;
 
-       skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
+       skb = __netdev_alloc_skb(bp->dev, bp->rx_buf_size, gfp);
        if (skb == NULL) {
                return -ENOMEM;
        }
@@ -2721,9 +2726,9 @@ bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
        if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
                skb_reserve(skb, BNX2_RX_ALIGN - align);
 
-       mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
-               PCI_DMA_FROMDEVICE);
-       if (pci_dma_mapping_error(bp->pdev, mapping)) {
+       mapping = dma_map_single(&bp->pdev->dev, skb->data, bp->rx_buf_use_size,
+                                PCI_DMA_FROMDEVICE);
+       if (dma_mapping_error(&bp->pdev->dev, mapping)) {
                dev_kfree_skb(skb);
                return -EIO;
        }
@@ -2829,7 +2834,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
                        }
                }
 
-               pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
+               dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
                        skb_headlen(skb), PCI_DMA_TODEVICE);
 
                tx_buf->skb = NULL;
@@ -2838,7 +2843,7 @@ bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
                for (i = 0; i < last; i++) {
                        sw_cons = NEXT_TX_BD(sw_cons);
 
-                       pci_unmap_page(bp->pdev,
+                       dma_unmap_page(&bp->pdev->dev,
                                dma_unmap_addr(
                                        &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
                                        mapping),
@@ -2945,7 +2950,7 @@ bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
        cons_rx_buf = &rxr->rx_buf_ring[cons];
        prod_rx_buf = &rxr->rx_buf_ring[prod];
 
-       pci_dma_sync_single_for_device(bp->pdev,
+       dma_sync_single_for_device(&bp->pdev->dev,
                dma_unmap_addr(cons_rx_buf, mapping),
                BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
 
@@ -2974,7 +2979,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
        int err;
        u16 prod = ring_idx & 0xffff;
 
-       err = bnx2_alloc_rx_skb(bp, rxr, prod);
+       err = bnx2_alloc_rx_skb(bp, rxr, prod, GFP_ATOMIC);
        if (unlikely(err)) {
                bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
                if (hdr_len) {
@@ -2987,7 +2992,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
        }
 
        skb_reserve(skb, BNX2_RX_OFFSET);
-       pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
+       dma_unmap_single(&bp->pdev->dev, dma_addr, bp->rx_buf_use_size,
                         PCI_DMA_FROMDEVICE);
 
        if (hdr_len == 0) {
@@ -3039,7 +3044,8 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
                        rx_pg->page = NULL;
 
                        err = bnx2_alloc_rx_page(bp, rxr,
-                                                RX_PG_RING_IDX(pg_prod));
+                                                RX_PG_RING_IDX(pg_prod),
+                                                GFP_ATOMIC);
                        if (unlikely(err)) {
                                rxr->rx_pg_cons = pg_cons;
                                rxr->rx_pg_prod = pg_prod;
@@ -3048,7 +3054,7 @@ bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
                                return err;
                        }
 
-                       pci_unmap_page(bp->pdev, mapping_old,
+                       dma_unmap_page(&bp->pdev->dev, mapping_old,
                                       PAGE_SIZE, PCI_DMA_FROMDEVICE);
 
                        frag_size -= frag_len;
@@ -3086,7 +3092,6 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
        u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
        struct l2_fhdr *rx_hdr;
        int rx_pkt = 0, pg_ring_used = 0;
-       struct pci_dev *pdev = bp->pdev;
 
        hw_cons = bnx2_get_hw_rx_cons(bnapi);
        sw_cons = rxr->rx_cons;
@@ -3112,17 +3117,15 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
                skb = rx_buf->skb;
                prefetchw(skb);
 
-               if (!get_dma_ops(&pdev->dev)->sync_single_for_cpu) {
-                       next_rx_buf =
-                               &rxr->rx_buf_ring[
-                                       RX_RING_IDX(NEXT_RX_BD(sw_cons))];
-                       prefetch(next_rx_buf->desc);
-               }
+               next_rx_buf =
+                       &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))];
+               prefetch(next_rx_buf->desc);
+
                rx_buf->skb = NULL;
 
                dma_addr = dma_unmap_addr(rx_buf, mapping);
 
-               pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
+               dma_sync_single_for_cpu(&bp->pdev->dev, dma_addr,
                        BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
                        PCI_DMA_FROMDEVICE);
 
@@ -3222,6 +3225,10 @@ bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
                                              L2_FHDR_ERRORS_UDP_XSUM)) == 0))
                                skb->ip_summed = CHECKSUM_UNNECESSARY;
                }
+               if ((bp->dev->features & NETIF_F_RXHASH) &&
+                   ((status & L2_FHDR_STATUS_USE_RXHASH) ==
+                    L2_FHDR_STATUS_USE_RXHASH))
+                       skb->rxhash = rx_hdr->l2_fhdr_hash;
 
                skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
 
@@ -5178,7 +5185,7 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
 
        ring_prod = prod = rxr->rx_pg_prod;
        for (i = 0; i < bp->rx_pg_ring_size; i++) {
-               if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0) {
+               if (bnx2_alloc_rx_page(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
                        netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
                                    ring_num, i, bp->rx_pg_ring_size);
                        break;
@@ -5190,7 +5197,7 @@ bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
 
        ring_prod = prod = rxr->rx_prod;
        for (i = 0; i < bp->rx_ring_size; i++) {
-               if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0) {
+               if (bnx2_alloc_rx_skb(bp, rxr, ring_prod, GFP_KERNEL) < 0) {
                        netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
                                    ring_num, i, bp->rx_ring_size);
                        break;
@@ -5336,7 +5343,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
                                continue;
                        }
 
-                       pci_unmap_single(bp->pdev,
+                       dma_unmap_single(&bp->pdev->dev,
                                         dma_unmap_addr(tx_buf, mapping),
                                         skb_headlen(skb),
                                         PCI_DMA_TODEVICE);
@@ -5347,7 +5354,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
                        j++;
                        for (k = 0; k < last; k++, j++) {
                                tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
-                               pci_unmap_page(bp->pdev,
+                               dma_unmap_page(&bp->pdev->dev,
                                        dma_unmap_addr(tx_buf, mapping),
                                        skb_shinfo(skb)->frags[k].size,
                                        PCI_DMA_TODEVICE);
@@ -5377,7 +5384,7 @@ bnx2_free_rx_skbs(struct bnx2 *bp)
                        if (skb == NULL)
                                continue;
 
-                       pci_unmap_single(bp->pdev,
+                       dma_unmap_single(&bp->pdev->dev,
                                         dma_unmap_addr(rx_buf, mapping),
                                         bp->rx_buf_use_size,
                                         PCI_DMA_FROMDEVICE);
@@ -5730,9 +5737,9 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
        for (i = 14; i < pkt_size; i++)
                packet[i] = (unsigned char) (i & 0xff);
 
-       map = pci_map_single(bp->pdev, skb->data, pkt_size,
-               PCI_DMA_TODEVICE);
-       if (pci_dma_mapping_error(bp->pdev, map)) {
+       map = dma_map_single(&bp->pdev->dev, skb->data, pkt_size,
+                            PCI_DMA_TODEVICE);
+       if (dma_mapping_error(&bp->pdev->dev, map)) {
                dev_kfree_skb(skb);
                return -EIO;
        }
@@ -5770,7 +5777,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
 
        udelay(5);
 
-       pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
+       dma_unmap_single(&bp->pdev->dev, map, pkt_size, PCI_DMA_TODEVICE);
        dev_kfree_skb(skb);
 
        if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
@@ -5787,7 +5794,7 @@ bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
        rx_hdr = rx_buf->desc;
        skb_reserve(rx_skb, BNX2_RX_OFFSET);
 
-       pci_dma_sync_single_for_cpu(bp->pdev,
+       dma_sync_single_for_cpu(&bp->pdev->dev,
                dma_unmap_addr(rx_buf, mapping),
                bp->rx_buf_size, PCI_DMA_FROMDEVICE);
 
@@ -6188,7 +6195,7 @@ bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
        bp->irq_nvecs = 1;
        bp->irq_tbl[0].vector = bp->pdev->irq;
 
-       if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
+       if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
                bnx2_enable_msix(bp, msix_vecs);
 
        if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
@@ -6312,9 +6319,14 @@ static void
 bnx2_dump_state(struct bnx2 *bp)
 {
        struct net_device *dev = bp->dev;
-       u32 mcp_p0, mcp_p1;
-
-       netdev_err(dev, "DEBUG: intr_sem[%x]\n", atomic_read(&bp->intr_sem));
+       u32 mcp_p0, mcp_p1, val1, val2;
+
+       pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
+       netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
+                  atomic_read(&bp->intr_sem), val1);
+       pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
+       pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
+       netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
        netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
                   REG_RD(bp, BNX2_EMAC_TX_STATUS),
                   REG_RD(bp, BNX2_EMAC_RX_STATUS));
@@ -6450,8 +6462,8 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
        } else
                mss = 0;
 
-       mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
-       if (pci_dma_mapping_error(bp->pdev, mapping)) {
+       mapping = dma_map_single(&bp->pdev->dev, skb->data, len, PCI_DMA_TODEVICE);
+       if (dma_mapping_error(&bp->pdev->dev, mapping)) {
                dev_kfree_skb(skb);
                return NETDEV_TX_OK;
        }
@@ -6479,9 +6491,9 @@ bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
                txbd = &txr->tx_desc_ring[ring_prod];
 
                len = frag->size;
-               mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
-                       len, PCI_DMA_TODEVICE);
-               if (pci_dma_mapping_error(bp->pdev, mapping))
+               mapping = dma_map_page(&bp->pdev->dev, frag->page, frag->page_offset,
+                                      len, PCI_DMA_TODEVICE);
+               if (dma_mapping_error(&bp->pdev->dev, mapping))
                        goto dma_error;
                dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
                                   mapping);
@@ -6520,7 +6532,7 @@ dma_error:
        ring_prod = TX_RING_IDX(prod);
        tx_buf = &txr->tx_buf_ring[ring_prod];
        tx_buf->skb = NULL;
-       pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
+       dma_unmap_single(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
                         skb_headlen(skb), PCI_DMA_TODEVICE);
 
        /* unmap remaining mapped pages */
@@ -6528,7 +6540,7 @@ dma_error:
                prod = NEXT_TX_BD(prod);
                ring_prod = TX_RING_IDX(prod);
                tx_buf = &txr->tx_buf_ring[ring_prod];
-               pci_unmap_page(bp->pdev, dma_unmap_addr(tx_buf, mapping),
+               dma_unmap_page(&bp->pdev->dev, dma_unmap_addr(tx_buf, mapping),
                               skb_shinfo(skb)->frags[i].size,
                               PCI_DMA_TODEVICE);
        }
@@ -6583,36 +6595,25 @@ bnx2_save_stats(struct bnx2 *bp)
                temp_stats[i] += hw_stats[i];
 }
 
-#define GET_64BIT_NET_STATS64(ctr)                             \
-       (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
-       (unsigned long) (ctr##_lo)
-
-#define GET_64BIT_NET_STATS32(ctr)                             \
-       (ctr##_lo)
+#define GET_64BIT_NET_STATS64(ctr)             \
+       (((u64) (ctr##_hi) << 32) + (u64) (ctr##_lo))
 
-#if (BITS_PER_LONG == 64)
 #define GET_64BIT_NET_STATS(ctr)                               \
        GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +             \
        GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
-#else
-#define GET_64BIT_NET_STATS(ctr)                               \
-       GET_64BIT_NET_STATS32(bp->stats_blk->ctr) +             \
-       GET_64BIT_NET_STATS32(bp->temp_stats_blk->ctr)
-#endif
 
 #define GET_32BIT_NET_STATS(ctr)                               \
        (unsigned long) (bp->stats_blk->ctr +                   \
                         bp->temp_stats_blk->ctr)
 
-static struct net_device_stats *
-bnx2_get_stats(struct net_device *dev)
+static struct rtnl_link_stats64 *
+bnx2_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *net_stats)
 {
        struct bnx2 *bp = netdev_priv(dev);
-       struct net_device_stats *net_stats = &dev->stats;
 
-       if (bp->stats_blk == NULL) {
+       if (bp->stats_blk == NULL)
                return net_stats;
-       }
+
        net_stats->rx_packets =
                GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
                GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
@@ -7561,6 +7562,12 @@ bnx2_set_tx_csum(struct net_device *dev, u32 data)
                return (ethtool_op_set_tx_csum(dev, data));
 }
 
+static int
+bnx2_set_flags(struct net_device *dev, u32 data)
+{
+       return ethtool_op_set_flags(dev, data, ETH_FLAG_RXHASH);
+}
+
 static const struct ethtool_ops bnx2_ethtool_ops = {
        .get_settings           = bnx2_get_settings,
        .set_settings           = bnx2_set_settings,
@@ -7590,6 +7597,8 @@ static const struct ethtool_ops bnx2_ethtool_ops = {
        .phys_id                = bnx2_phys_id,
        .get_ethtool_stats      = bnx2_get_ethtool_stats,
        .get_sset_count         = bnx2_get_sset_count,
+       .set_flags              = bnx2_set_flags,
+       .get_flags              = ethtool_op_get_flags,
 };
 
 /* Called with rtnl_lock */
@@ -8275,7 +8284,7 @@ static const struct net_device_ops bnx2_netdev_ops = {
        .ndo_open               = bnx2_open,
        .ndo_start_xmit         = bnx2_start_xmit,
        .ndo_stop               = bnx2_close,
-       .ndo_get_stats          = bnx2_get_stats,
+       .ndo_get_stats64        = bnx2_get_stats64,
        .ndo_set_rx_mode        = bnx2_set_rx_mode,
        .ndo_do_ioctl           = bnx2_ioctl,
        .ndo_validate_addr      = eth_validate_addr,
@@ -8336,7 +8345,8 @@ bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        memcpy(dev->dev_addr, bp->mac_addr, 6);
        memcpy(dev->perm_addr, bp->mac_addr, 6);
 
-       dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO;
+       dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO |
+                        NETIF_F_RXHASH;
        vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG);
        if (CHIP_NUM(bp) == CHIP_NUM_5709) {
                dev->features |= NETIF_F_IPV6_CSUM;