]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/bnx2.c
bnx2: Update version to 2.0.16.
[net-next-2.6.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2010 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16
17 #include <linux/kernel.h>
18 #include <linux/timer.h>
19 #include <linux/errno.h>
20 #include <linux/ioport.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/interrupt.h>
24 #include <linux/pci.h>
25 #include <linux/init.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/bitops.h>
31 #include <asm/io.h>
32 #include <asm/irq.h>
33 #include <linux/delay.h>
34 #include <asm/byteorder.h>
35 #include <asm/page.h>
36 #include <linux/time.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/if_vlan.h>
40 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
41 #define BCM_VLAN 1
42 #endif
43 #include <net/ip.h>
44 #include <net/tcp.h>
45 #include <net/checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/prefetch.h>
49 #include <linux/cache.h>
50 #include <linux/firmware.h>
51 #include <linux/log2.h>
52
53 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54 #define BCM_CNIC 1
55 #include "cnic_if.h"
56 #endif
57 #include "bnx2.h"
58 #include "bnx2_fw.h"
59
60 #define DRV_MODULE_NAME         "bnx2"
61 #define DRV_MODULE_VERSION      "2.0.16"
62 #define DRV_MODULE_RELDATE      "July 2, 2010"
63 #define FW_MIPS_FILE_06         "bnx2/bnx2-mips-06-5.0.0.j6.fw"
64 #define FW_RV2P_FILE_06         "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
65 #define FW_MIPS_FILE_09         "bnx2/bnx2-mips-09-5.0.0.j15.fw"
66 #define FW_RV2P_FILE_09_Ax      "bnx2/bnx2-rv2p-09ax-5.0.0.j10.fw"
67 #define FW_RV2P_FILE_09         "bnx2/bnx2-rv2p-09-5.0.0.j10.fw"
68
69 #define RUN_AT(x) (jiffies + (x))
70
71 /* Time in jiffies before concluding the transmitter is hung. */
72 #define TX_TIMEOUT  (5*HZ)
73
74 static char version[] __devinitdata =
75         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76
77 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
78 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_MODULE_VERSION);
81 MODULE_FIRMWARE(FW_MIPS_FILE_06);
82 MODULE_FIRMWARE(FW_RV2P_FILE_06);
83 MODULE_FIRMWARE(FW_MIPS_FILE_09);
84 MODULE_FIRMWARE(FW_RV2P_FILE_09);
85 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
86
87 static int disable_msi = 0;
88
89 module_param(disable_msi, int, 0);
90 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
91
92 typedef enum {
93         BCM5706 = 0,
94         NC370T,
95         NC370I,
96         BCM5706S,
97         NC370F,
98         BCM5708,
99         BCM5708S,
100         BCM5709,
101         BCM5709S,
102         BCM5716,
103         BCM5716S,
104 } board_t;
105
106 /* indexed by board_t, above */
107 static struct {
108         char *name;
109 } board_info[] __devinitdata = {
110         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
111         { "HP NC370T Multifunction Gigabit Server Adapter" },
112         { "HP NC370i Multifunction Gigabit Server Adapter" },
113         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
114         { "HP NC370F Multifunction Gigabit Server Adapter" },
115         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
116         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
117         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
118         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
119         { "Broadcom NetXtreme II BCM5716 1000Base-T" },
120         { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
121         };
122
123 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
124         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
125           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
126         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
127           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
128         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
129           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
130         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
131           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
132         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
133           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
134         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
135           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
136         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
137           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
138         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
139           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
140         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
141           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
142         { PCI_VENDOR_ID_BROADCOM, 0x163b,
143           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
144         { PCI_VENDOR_ID_BROADCOM, 0x163c,
145           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
146         { 0, }
147 };
148
149 static const struct flash_spec flash_table[] =
150 {
151 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
152 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
153         /* Slow EEPROM */
154         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
155          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
156          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
157          "EEPROM - slow"},
158         /* Expansion entry 0001 */
159         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
160          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
161          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
162          "Entry 0001"},
163         /* Saifun SA25F010 (non-buffered flash) */
164         /* strap, cfg1, & write1 need updates */
165         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
166          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
167          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
168          "Non-buffered flash (128kB)"},
169         /* Saifun SA25F020 (non-buffered flash) */
170         /* strap, cfg1, & write1 need updates */
171         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
172          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
173          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
174          "Non-buffered flash (256kB)"},
175         /* Expansion entry 0100 */
176         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
177          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
178          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
179          "Entry 0100"},
180         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
181         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
182          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
183          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
184          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
185         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
186         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
187          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
188          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
189          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
190         /* Saifun SA25F005 (non-buffered flash) */
191         /* strap, cfg1, & write1 need updates */
192         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
193          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
195          "Non-buffered flash (64kB)"},
196         /* Fast EEPROM */
197         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
198          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
199          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
200          "EEPROM - fast"},
201         /* Expansion entry 1001 */
202         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
203          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205          "Entry 1001"},
206         /* Expansion entry 1010 */
207         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
208          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
209          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
210          "Entry 1010"},
211         /* ATMEL AT45DB011B (buffered flash) */
212         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
213          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
215          "Buffered flash (128kB)"},
216         /* Expansion entry 1100 */
217         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
218          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
219          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
220          "Entry 1100"},
221         /* Expansion entry 1101 */
222         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
223          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
224          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
225          "Entry 1101"},
226         /* Ateml Expansion entry 1110 */
227         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
228          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
229          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
230          "Entry 1110 (Atmel)"},
231         /* ATMEL AT45DB021B (buffered flash) */
232         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
233          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
234          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
235          "Buffered flash (256kB)"},
236 };
237
238 static const struct flash_spec flash_5709 = {
239         .flags          = BNX2_NV_BUFFERED,
240         .page_bits      = BCM5709_FLASH_PAGE_BITS,
241         .page_size      = BCM5709_FLASH_PAGE_SIZE,
242         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
243         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
244         .name           = "5709 Buffered flash (256kB)",
245 };
246
247 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
248
249 static void bnx2_init_napi(struct bnx2 *bp);
250 static void bnx2_del_napi(struct bnx2 *bp);
251
252 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
253 {
254         u32 diff;
255
256         smp_mb();
257
258         /* The ring uses 256 indices for 255 entries, one of them
259          * needs to be skipped.
260          */
261         diff = txr->tx_prod - txr->tx_cons;
262         if (unlikely(diff >= TX_DESC_CNT)) {
263                 diff &= 0xffff;
264                 if (diff == TX_DESC_CNT)
265                         diff = MAX_TX_DESC_CNT;
266         }
267         return (bp->tx_ring_size - diff);
268 }
269
270 static u32
271 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
272 {
273         u32 val;
274
275         spin_lock_bh(&bp->indirect_lock);
276         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
277         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
278         spin_unlock_bh(&bp->indirect_lock);
279         return val;
280 }
281
282 static void
283 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
284 {
285         spin_lock_bh(&bp->indirect_lock);
286         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
287         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
288         spin_unlock_bh(&bp->indirect_lock);
289 }
290
291 static void
292 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
293 {
294         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
295 }
296
297 static u32
298 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
299 {
300         return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
301 }
302
303 static void
304 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
305 {
306         offset += cid_addr;
307         spin_lock_bh(&bp->indirect_lock);
308         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
309                 int i;
310
311                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
312                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
313                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
314                 for (i = 0; i < 5; i++) {
315                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
316                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
317                                 break;
318                         udelay(5);
319                 }
320         } else {
321                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
322                 REG_WR(bp, BNX2_CTX_DATA, val);
323         }
324         spin_unlock_bh(&bp->indirect_lock);
325 }
326
327 #ifdef BCM_CNIC
328 static int
329 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
330 {
331         struct bnx2 *bp = netdev_priv(dev);
332         struct drv_ctl_io *io = &info->data.io;
333
334         switch (info->cmd) {
335         case DRV_CTL_IO_WR_CMD:
336                 bnx2_reg_wr_ind(bp, io->offset, io->data);
337                 break;
338         case DRV_CTL_IO_RD_CMD:
339                 io->data = bnx2_reg_rd_ind(bp, io->offset);
340                 break;
341         case DRV_CTL_CTX_WR_CMD:
342                 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
343                 break;
344         default:
345                 return -EINVAL;
346         }
347         return 0;
348 }
349
350 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
351 {
352         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
353         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
354         int sb_id;
355
356         if (bp->flags & BNX2_FLAG_USING_MSIX) {
357                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
358                 bnapi->cnic_present = 0;
359                 sb_id = bp->irq_nvecs;
360                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
361         } else {
362                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
363                 bnapi->cnic_tag = bnapi->last_status_idx;
364                 bnapi->cnic_present = 1;
365                 sb_id = 0;
366                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
367         }
368
369         cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
370         cp->irq_arr[0].status_blk = (void *)
371                 ((unsigned long) bnapi->status_blk.msi +
372                 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
373         cp->irq_arr[0].status_blk_num = sb_id;
374         cp->num_irq = 1;
375 }
376
377 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
378                               void *data)
379 {
380         struct bnx2 *bp = netdev_priv(dev);
381         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
382
383         if (ops == NULL)
384                 return -EINVAL;
385
386         if (cp->drv_state & CNIC_DRV_STATE_REGD)
387                 return -EBUSY;
388
389         bp->cnic_data = data;
390         rcu_assign_pointer(bp->cnic_ops, ops);
391
392         cp->num_irq = 0;
393         cp->drv_state = CNIC_DRV_STATE_REGD;
394
395         bnx2_setup_cnic_irq_info(bp);
396
397         return 0;
398 }
399
400 static int bnx2_unregister_cnic(struct net_device *dev)
401 {
402         struct bnx2 *bp = netdev_priv(dev);
403         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
404         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
405
406         mutex_lock(&bp->cnic_lock);
407         cp->drv_state = 0;
408         bnapi->cnic_present = 0;
409         rcu_assign_pointer(bp->cnic_ops, NULL);
410         mutex_unlock(&bp->cnic_lock);
411         synchronize_rcu();
412         return 0;
413 }
414
415 struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
416 {
417         struct bnx2 *bp = netdev_priv(dev);
418         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
419
420         cp->drv_owner = THIS_MODULE;
421         cp->chip_id = bp->chip_id;
422         cp->pdev = bp->pdev;
423         cp->io_base = bp->regview;
424         cp->drv_ctl = bnx2_drv_ctl;
425         cp->drv_register_cnic = bnx2_register_cnic;
426         cp->drv_unregister_cnic = bnx2_unregister_cnic;
427
428         return cp;
429 }
430 EXPORT_SYMBOL(bnx2_cnic_probe);
431
432 static void
433 bnx2_cnic_stop(struct bnx2 *bp)
434 {
435         struct cnic_ops *c_ops;
436         struct cnic_ctl_info info;
437
438         mutex_lock(&bp->cnic_lock);
439         c_ops = bp->cnic_ops;
440         if (c_ops) {
441                 info.cmd = CNIC_CTL_STOP_CMD;
442                 c_ops->cnic_ctl(bp->cnic_data, &info);
443         }
444         mutex_unlock(&bp->cnic_lock);
445 }
446
447 static void
448 bnx2_cnic_start(struct bnx2 *bp)
449 {
450         struct cnic_ops *c_ops;
451         struct cnic_ctl_info info;
452
453         mutex_lock(&bp->cnic_lock);
454         c_ops = bp->cnic_ops;
455         if (c_ops) {
456                 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
457                         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
458
459                         bnapi->cnic_tag = bnapi->last_status_idx;
460                 }
461                 info.cmd = CNIC_CTL_START_CMD;
462                 c_ops->cnic_ctl(bp->cnic_data, &info);
463         }
464         mutex_unlock(&bp->cnic_lock);
465 }
466
467 #else
468
469 static void
470 bnx2_cnic_stop(struct bnx2 *bp)
471 {
472 }
473
474 static void
475 bnx2_cnic_start(struct bnx2 *bp)
476 {
477 }
478
479 #endif
480
481 static int
482 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
483 {
484         u32 val1;
485         int i, ret;
486
487         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
488                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
489                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
490
491                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
492                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
493
494                 udelay(40);
495         }
496
497         val1 = (bp->phy_addr << 21) | (reg << 16) |
498                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
499                 BNX2_EMAC_MDIO_COMM_START_BUSY;
500         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
501
502         for (i = 0; i < 50; i++) {
503                 udelay(10);
504
505                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
506                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
507                         udelay(5);
508
509                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
510                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
511
512                         break;
513                 }
514         }
515
516         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
517                 *val = 0x0;
518                 ret = -EBUSY;
519         }
520         else {
521                 *val = val1;
522                 ret = 0;
523         }
524
525         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
526                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
527                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
528
529                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
530                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
531
532                 udelay(40);
533         }
534
535         return ret;
536 }
537
538 static int
539 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
540 {
541         u32 val1;
542         int i, ret;
543
544         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
545                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
546                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
547
548                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
549                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
550
551                 udelay(40);
552         }
553
554         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
555                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
556                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
557         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
558
559         for (i = 0; i < 50; i++) {
560                 udelay(10);
561
562                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
563                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
564                         udelay(5);
565                         break;
566                 }
567         }
568
569         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
570                 ret = -EBUSY;
571         else
572                 ret = 0;
573
574         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
575                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
576                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
577
578                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
579                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
580
581                 udelay(40);
582         }
583
584         return ret;
585 }
586
587 static void
588 bnx2_disable_int(struct bnx2 *bp)
589 {
590         int i;
591         struct bnx2_napi *bnapi;
592
593         for (i = 0; i < bp->irq_nvecs; i++) {
594                 bnapi = &bp->bnx2_napi[i];
595                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
596                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
597         }
598         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
599 }
600
601 static void
602 bnx2_enable_int(struct bnx2 *bp)
603 {
604         int i;
605         struct bnx2_napi *bnapi;
606
607         for (i = 0; i < bp->irq_nvecs; i++) {
608                 bnapi = &bp->bnx2_napi[i];
609
610                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
611                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
612                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
613                        bnapi->last_status_idx);
614
615                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
616                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
617                        bnapi->last_status_idx);
618         }
619         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
620 }
621
622 static void
623 bnx2_disable_int_sync(struct bnx2 *bp)
624 {
625         int i;
626
627         atomic_inc(&bp->intr_sem);
628         if (!netif_running(bp->dev))
629                 return;
630
631         bnx2_disable_int(bp);
632         for (i = 0; i < bp->irq_nvecs; i++)
633                 synchronize_irq(bp->irq_tbl[i].vector);
634 }
635
636 static void
637 bnx2_napi_disable(struct bnx2 *bp)
638 {
639         int i;
640
641         for (i = 0; i < bp->irq_nvecs; i++)
642                 napi_disable(&bp->bnx2_napi[i].napi);
643 }
644
645 static void
646 bnx2_napi_enable(struct bnx2 *bp)
647 {
648         int i;
649
650         for (i = 0; i < bp->irq_nvecs; i++)
651                 napi_enable(&bp->bnx2_napi[i].napi);
652 }
653
654 static void
655 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
656 {
657         if (stop_cnic)
658                 bnx2_cnic_stop(bp);
659         if (netif_running(bp->dev)) {
660                 bnx2_napi_disable(bp);
661                 netif_tx_disable(bp->dev);
662         }
663         bnx2_disable_int_sync(bp);
664         netif_carrier_off(bp->dev);     /* prevent tx timeout */
665 }
666
667 static void
668 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
669 {
670         if (atomic_dec_and_test(&bp->intr_sem)) {
671                 if (netif_running(bp->dev)) {
672                         netif_tx_wake_all_queues(bp->dev);
673                         spin_lock_bh(&bp->phy_lock);
674                         if (bp->link_up)
675                                 netif_carrier_on(bp->dev);
676                         spin_unlock_bh(&bp->phy_lock);
677                         bnx2_napi_enable(bp);
678                         bnx2_enable_int(bp);
679                         if (start_cnic)
680                                 bnx2_cnic_start(bp);
681                 }
682         }
683 }
684
685 static void
686 bnx2_free_tx_mem(struct bnx2 *bp)
687 {
688         int i;
689
690         for (i = 0; i < bp->num_tx_rings; i++) {
691                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
692                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
693
694                 if (txr->tx_desc_ring) {
695                         pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
696                                             txr->tx_desc_ring,
697                                             txr->tx_desc_mapping);
698                         txr->tx_desc_ring = NULL;
699                 }
700                 kfree(txr->tx_buf_ring);
701                 txr->tx_buf_ring = NULL;
702         }
703 }
704
705 static void
706 bnx2_free_rx_mem(struct bnx2 *bp)
707 {
708         int i;
709
710         for (i = 0; i < bp->num_rx_rings; i++) {
711                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
712                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
713                 int j;
714
715                 for (j = 0; j < bp->rx_max_ring; j++) {
716                         if (rxr->rx_desc_ring[j])
717                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
718                                                     rxr->rx_desc_ring[j],
719                                                     rxr->rx_desc_mapping[j]);
720                         rxr->rx_desc_ring[j] = NULL;
721                 }
722                 vfree(rxr->rx_buf_ring);
723                 rxr->rx_buf_ring = NULL;
724
725                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
726                         if (rxr->rx_pg_desc_ring[j])
727                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
728                                                     rxr->rx_pg_desc_ring[j],
729                                                     rxr->rx_pg_desc_mapping[j]);
730                         rxr->rx_pg_desc_ring[j] = NULL;
731                 }
732                 vfree(rxr->rx_pg_ring);
733                 rxr->rx_pg_ring = NULL;
734         }
735 }
736
737 static int
738 bnx2_alloc_tx_mem(struct bnx2 *bp)
739 {
740         int i;
741
742         for (i = 0; i < bp->num_tx_rings; i++) {
743                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
744                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
745
746                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
747                 if (txr->tx_buf_ring == NULL)
748                         return -ENOMEM;
749
750                 txr->tx_desc_ring =
751                         pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
752                                              &txr->tx_desc_mapping);
753                 if (txr->tx_desc_ring == NULL)
754                         return -ENOMEM;
755         }
756         return 0;
757 }
758
759 static int
760 bnx2_alloc_rx_mem(struct bnx2 *bp)
761 {
762         int i;
763
764         for (i = 0; i < bp->num_rx_rings; i++) {
765                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
766                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
767                 int j;
768
769                 rxr->rx_buf_ring =
770                         vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
771                 if (rxr->rx_buf_ring == NULL)
772                         return -ENOMEM;
773
774                 memset(rxr->rx_buf_ring, 0,
775                        SW_RXBD_RING_SIZE * bp->rx_max_ring);
776
777                 for (j = 0; j < bp->rx_max_ring; j++) {
778                         rxr->rx_desc_ring[j] =
779                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
780                                                      &rxr->rx_desc_mapping[j]);
781                         if (rxr->rx_desc_ring[j] == NULL)
782                                 return -ENOMEM;
783
784                 }
785
786                 if (bp->rx_pg_ring_size) {
787                         rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
788                                                   bp->rx_max_pg_ring);
789                         if (rxr->rx_pg_ring == NULL)
790                                 return -ENOMEM;
791
792                         memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
793                                bp->rx_max_pg_ring);
794                 }
795
796                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
797                         rxr->rx_pg_desc_ring[j] =
798                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
799                                                 &rxr->rx_pg_desc_mapping[j]);
800                         if (rxr->rx_pg_desc_ring[j] == NULL)
801                                 return -ENOMEM;
802
803                 }
804         }
805         return 0;
806 }
807
808 static void
809 bnx2_free_mem(struct bnx2 *bp)
810 {
811         int i;
812         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
813
814         bnx2_free_tx_mem(bp);
815         bnx2_free_rx_mem(bp);
816
817         for (i = 0; i < bp->ctx_pages; i++) {
818                 if (bp->ctx_blk[i]) {
819                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
820                                             bp->ctx_blk[i],
821                                             bp->ctx_blk_mapping[i]);
822                         bp->ctx_blk[i] = NULL;
823                 }
824         }
825         if (bnapi->status_blk.msi) {
826                 pci_free_consistent(bp->pdev, bp->status_stats_size,
827                                     bnapi->status_blk.msi,
828                                     bp->status_blk_mapping);
829                 bnapi->status_blk.msi = NULL;
830                 bp->stats_blk = NULL;
831         }
832 }
833
834 static int
835 bnx2_alloc_mem(struct bnx2 *bp)
836 {
837         int i, status_blk_size, err;
838         struct bnx2_napi *bnapi;
839         void *status_blk;
840
841         /* Combine status and statistics blocks into one allocation. */
842         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
843         if (bp->flags & BNX2_FLAG_MSIX_CAP)
844                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
845                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
846         bp->status_stats_size = status_blk_size +
847                                 sizeof(struct statistics_block);
848
849         status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
850                                           &bp->status_blk_mapping);
851         if (status_blk == NULL)
852                 goto alloc_mem_err;
853
854         memset(status_blk, 0, bp->status_stats_size);
855
856         bnapi = &bp->bnx2_napi[0];
857         bnapi->status_blk.msi = status_blk;
858         bnapi->hw_tx_cons_ptr =
859                 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
860         bnapi->hw_rx_cons_ptr =
861                 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
862         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
863                 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
864                         struct status_block_msix *sblk;
865
866                         bnapi = &bp->bnx2_napi[i];
867
868                         sblk = (void *) (status_blk +
869                                          BNX2_SBLK_MSIX_ALIGN_SIZE * i);
870                         bnapi->status_blk.msix = sblk;
871                         bnapi->hw_tx_cons_ptr =
872                                 &sblk->status_tx_quick_consumer_index;
873                         bnapi->hw_rx_cons_ptr =
874                                 &sblk->status_rx_quick_consumer_index;
875                         bnapi->int_num = i << 24;
876                 }
877         }
878
879         bp->stats_blk = status_blk + status_blk_size;
880
881         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
882
883         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
884                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
885                 if (bp->ctx_pages == 0)
886                         bp->ctx_pages = 1;
887                 for (i = 0; i < bp->ctx_pages; i++) {
888                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
889                                                 BCM_PAGE_SIZE,
890                                                 &bp->ctx_blk_mapping[i]);
891                         if (bp->ctx_blk[i] == NULL)
892                                 goto alloc_mem_err;
893                 }
894         }
895
896         err = bnx2_alloc_rx_mem(bp);
897         if (err)
898                 goto alloc_mem_err;
899
900         err = bnx2_alloc_tx_mem(bp);
901         if (err)
902                 goto alloc_mem_err;
903
904         return 0;
905
906 alloc_mem_err:
907         bnx2_free_mem(bp);
908         return -ENOMEM;
909 }
910
911 static void
912 bnx2_report_fw_link(struct bnx2 *bp)
913 {
914         u32 fw_link_status = 0;
915
916         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
917                 return;
918
919         if (bp->link_up) {
920                 u32 bmsr;
921
922                 switch (bp->line_speed) {
923                 case SPEED_10:
924                         if (bp->duplex == DUPLEX_HALF)
925                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
926                         else
927                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
928                         break;
929                 case SPEED_100:
930                         if (bp->duplex == DUPLEX_HALF)
931                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
932                         else
933                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
934                         break;
935                 case SPEED_1000:
936                         if (bp->duplex == DUPLEX_HALF)
937                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
938                         else
939                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
940                         break;
941                 case SPEED_2500:
942                         if (bp->duplex == DUPLEX_HALF)
943                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
944                         else
945                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
946                         break;
947                 }
948
949                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
950
951                 if (bp->autoneg) {
952                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
953
954                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
955                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
956
957                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
958                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
959                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
960                         else
961                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
962                 }
963         }
964         else
965                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
966
967         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
968 }
969
970 static char *
971 bnx2_xceiver_str(struct bnx2 *bp)
972 {
973         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
974                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
975                  "Copper"));
976 }
977
978 static void
979 bnx2_report_link(struct bnx2 *bp)
980 {
981         if (bp->link_up) {
982                 netif_carrier_on(bp->dev);
983                 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
984                             bnx2_xceiver_str(bp),
985                             bp->line_speed,
986                             bp->duplex == DUPLEX_FULL ? "full" : "half");
987
988                 if (bp->flow_ctrl) {
989                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
990                                 pr_cont(", receive ");
991                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
992                                         pr_cont("& transmit ");
993                         }
994                         else {
995                                 pr_cont(", transmit ");
996                         }
997                         pr_cont("flow control ON");
998                 }
999                 pr_cont("\n");
1000         } else {
1001                 netif_carrier_off(bp->dev);
1002                 netdev_err(bp->dev, "NIC %s Link is Down\n",
1003                            bnx2_xceiver_str(bp));
1004         }
1005
1006         bnx2_report_fw_link(bp);
1007 }
1008
1009 static void
1010 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1011 {
1012         u32 local_adv, remote_adv;
1013
1014         bp->flow_ctrl = 0;
1015         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1016                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1017
1018                 if (bp->duplex == DUPLEX_FULL) {
1019                         bp->flow_ctrl = bp->req_flow_ctrl;
1020                 }
1021                 return;
1022         }
1023
1024         if (bp->duplex != DUPLEX_FULL) {
1025                 return;
1026         }
1027
1028         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1029             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1030                 u32 val;
1031
1032                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1033                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1034                         bp->flow_ctrl |= FLOW_CTRL_TX;
1035                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1036                         bp->flow_ctrl |= FLOW_CTRL_RX;
1037                 return;
1038         }
1039
1040         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1041         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1042
1043         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1044                 u32 new_local_adv = 0;
1045                 u32 new_remote_adv = 0;
1046
1047                 if (local_adv & ADVERTISE_1000XPAUSE)
1048                         new_local_adv |= ADVERTISE_PAUSE_CAP;
1049                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1050                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
1051                 if (remote_adv & ADVERTISE_1000XPAUSE)
1052                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
1053                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1054                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1055
1056                 local_adv = new_local_adv;
1057                 remote_adv = new_remote_adv;
1058         }
1059
1060         /* See Table 28B-3 of 802.3ab-1999 spec. */
1061         if (local_adv & ADVERTISE_PAUSE_CAP) {
1062                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1063                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1064                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1065                         }
1066                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1067                                 bp->flow_ctrl = FLOW_CTRL_RX;
1068                         }
1069                 }
1070                 else {
1071                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1072                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1073                         }
1074                 }
1075         }
1076         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1077                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1078                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1079
1080                         bp->flow_ctrl = FLOW_CTRL_TX;
1081                 }
1082         }
1083 }
1084
1085 static int
1086 bnx2_5709s_linkup(struct bnx2 *bp)
1087 {
1088         u32 val, speed;
1089
1090         bp->link_up = 1;
1091
1092         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1093         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1094         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1095
1096         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1097                 bp->line_speed = bp->req_line_speed;
1098                 bp->duplex = bp->req_duplex;
1099                 return 0;
1100         }
1101         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1102         switch (speed) {
1103                 case MII_BNX2_GP_TOP_AN_SPEED_10:
1104                         bp->line_speed = SPEED_10;
1105                         break;
1106                 case MII_BNX2_GP_TOP_AN_SPEED_100:
1107                         bp->line_speed = SPEED_100;
1108                         break;
1109                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1110                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1111                         bp->line_speed = SPEED_1000;
1112                         break;
1113                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1114                         bp->line_speed = SPEED_2500;
1115                         break;
1116         }
1117         if (val & MII_BNX2_GP_TOP_AN_FD)
1118                 bp->duplex = DUPLEX_FULL;
1119         else
1120                 bp->duplex = DUPLEX_HALF;
1121         return 0;
1122 }
1123
1124 static int
1125 bnx2_5708s_linkup(struct bnx2 *bp)
1126 {
1127         u32 val;
1128
1129         bp->link_up = 1;
1130         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1131         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1132                 case BCM5708S_1000X_STAT1_SPEED_10:
1133                         bp->line_speed = SPEED_10;
1134                         break;
1135                 case BCM5708S_1000X_STAT1_SPEED_100:
1136                         bp->line_speed = SPEED_100;
1137                         break;
1138                 case BCM5708S_1000X_STAT1_SPEED_1G:
1139                         bp->line_speed = SPEED_1000;
1140                         break;
1141                 case BCM5708S_1000X_STAT1_SPEED_2G5:
1142                         bp->line_speed = SPEED_2500;
1143                         break;
1144         }
1145         if (val & BCM5708S_1000X_STAT1_FD)
1146                 bp->duplex = DUPLEX_FULL;
1147         else
1148                 bp->duplex = DUPLEX_HALF;
1149
1150         return 0;
1151 }
1152
1153 static int
1154 bnx2_5706s_linkup(struct bnx2 *bp)
1155 {
1156         u32 bmcr, local_adv, remote_adv, common;
1157
1158         bp->link_up = 1;
1159         bp->line_speed = SPEED_1000;
1160
1161         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1162         if (bmcr & BMCR_FULLDPLX) {
1163                 bp->duplex = DUPLEX_FULL;
1164         }
1165         else {
1166                 bp->duplex = DUPLEX_HALF;
1167         }
1168
1169         if (!(bmcr & BMCR_ANENABLE)) {
1170                 return 0;
1171         }
1172
1173         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1174         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1175
1176         common = local_adv & remote_adv;
1177         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1178
1179                 if (common & ADVERTISE_1000XFULL) {
1180                         bp->duplex = DUPLEX_FULL;
1181                 }
1182                 else {
1183                         bp->duplex = DUPLEX_HALF;
1184                 }
1185         }
1186
1187         return 0;
1188 }
1189
1190 static int
1191 bnx2_copper_linkup(struct bnx2 *bp)
1192 {
1193         u32 bmcr;
1194
1195         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1196         if (bmcr & BMCR_ANENABLE) {
1197                 u32 local_adv, remote_adv, common;
1198
1199                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1200                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1201
1202                 common = local_adv & (remote_adv >> 2);
1203                 if (common & ADVERTISE_1000FULL) {
1204                         bp->line_speed = SPEED_1000;
1205                         bp->duplex = DUPLEX_FULL;
1206                 }
1207                 else if (common & ADVERTISE_1000HALF) {
1208                         bp->line_speed = SPEED_1000;
1209                         bp->duplex = DUPLEX_HALF;
1210                 }
1211                 else {
1212                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1213                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1214
1215                         common = local_adv & remote_adv;
1216                         if (common & ADVERTISE_100FULL) {
1217                                 bp->line_speed = SPEED_100;
1218                                 bp->duplex = DUPLEX_FULL;
1219                         }
1220                         else if (common & ADVERTISE_100HALF) {
1221                                 bp->line_speed = SPEED_100;
1222                                 bp->duplex = DUPLEX_HALF;
1223                         }
1224                         else if (common & ADVERTISE_10FULL) {
1225                                 bp->line_speed = SPEED_10;
1226                                 bp->duplex = DUPLEX_FULL;
1227                         }
1228                         else if (common & ADVERTISE_10HALF) {
1229                                 bp->line_speed = SPEED_10;
1230                                 bp->duplex = DUPLEX_HALF;
1231                         }
1232                         else {
1233                                 bp->line_speed = 0;
1234                                 bp->link_up = 0;
1235                         }
1236                 }
1237         }
1238         else {
1239                 if (bmcr & BMCR_SPEED100) {
1240                         bp->line_speed = SPEED_100;
1241                 }
1242                 else {
1243                         bp->line_speed = SPEED_10;
1244                 }
1245                 if (bmcr & BMCR_FULLDPLX) {
1246                         bp->duplex = DUPLEX_FULL;
1247                 }
1248                 else {
1249                         bp->duplex = DUPLEX_HALF;
1250                 }
1251         }
1252
1253         return 0;
1254 }
1255
1256 static void
1257 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1258 {
1259         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1260
1261         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1262         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1263         val |= 0x02 << 8;
1264
1265         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1266                 u32 lo_water, hi_water;
1267
1268                 if (bp->flow_ctrl & FLOW_CTRL_TX)
1269                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1270                 else
1271                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1272                 if (lo_water >= bp->rx_ring_size)
1273                         lo_water = 0;
1274
1275                 hi_water = min_t(int, bp->rx_ring_size / 4, lo_water + 16);
1276
1277                 if (hi_water <= lo_water)
1278                         lo_water = 0;
1279
1280                 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1281                 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1282
1283                 if (hi_water > 0xf)
1284                         hi_water = 0xf;
1285                 else if (hi_water == 0)
1286                         lo_water = 0;
1287                 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1288         }
1289         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1290 }
1291
1292 static void
1293 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1294 {
1295         int i;
1296         u32 cid;
1297
1298         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1299                 if (i == 1)
1300                         cid = RX_RSS_CID;
1301                 bnx2_init_rx_context(bp, cid);
1302         }
1303 }
1304
1305 static void
1306 bnx2_set_mac_link(struct bnx2 *bp)
1307 {
1308         u32 val;
1309
1310         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1311         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1312                 (bp->duplex == DUPLEX_HALF)) {
1313                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1314         }
1315
1316         /* Configure the EMAC mode register. */
1317         val = REG_RD(bp, BNX2_EMAC_MODE);
1318
1319         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1320                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1321                 BNX2_EMAC_MODE_25G_MODE);
1322
1323         if (bp->link_up) {
1324                 switch (bp->line_speed) {
1325                         case SPEED_10:
1326                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1327                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1328                                         break;
1329                                 }
1330                                 /* fall through */
1331                         case SPEED_100:
1332                                 val |= BNX2_EMAC_MODE_PORT_MII;
1333                                 break;
1334                         case SPEED_2500:
1335                                 val |= BNX2_EMAC_MODE_25G_MODE;
1336                                 /* fall through */
1337                         case SPEED_1000:
1338                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1339                                 break;
1340                 }
1341         }
1342         else {
1343                 val |= BNX2_EMAC_MODE_PORT_GMII;
1344         }
1345
1346         /* Set the MAC to operate in the appropriate duplex mode. */
1347         if (bp->duplex == DUPLEX_HALF)
1348                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1349         REG_WR(bp, BNX2_EMAC_MODE, val);
1350
1351         /* Enable/disable rx PAUSE. */
1352         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1353
1354         if (bp->flow_ctrl & FLOW_CTRL_RX)
1355                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1356         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1357
1358         /* Enable/disable tx PAUSE. */
1359         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1360         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1361
1362         if (bp->flow_ctrl & FLOW_CTRL_TX)
1363                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1364         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1365
1366         /* Acknowledge the interrupt. */
1367         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1368
1369         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1370                 bnx2_init_all_rx_contexts(bp);
1371 }
1372
1373 static void
1374 bnx2_enable_bmsr1(struct bnx2 *bp)
1375 {
1376         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1377             (CHIP_NUM(bp) == CHIP_NUM_5709))
1378                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1379                                MII_BNX2_BLK_ADDR_GP_STATUS);
1380 }
1381
1382 static void
1383 bnx2_disable_bmsr1(struct bnx2 *bp)
1384 {
1385         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1386             (CHIP_NUM(bp) == CHIP_NUM_5709))
1387                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1388                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1389 }
1390
1391 static int
1392 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1393 {
1394         u32 up1;
1395         int ret = 1;
1396
1397         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1398                 return 0;
1399
1400         if (bp->autoneg & AUTONEG_SPEED)
1401                 bp->advertising |= ADVERTISED_2500baseX_Full;
1402
1403         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1404                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1405
1406         bnx2_read_phy(bp, bp->mii_up1, &up1);
1407         if (!(up1 & BCM5708S_UP1_2G5)) {
1408                 up1 |= BCM5708S_UP1_2G5;
1409                 bnx2_write_phy(bp, bp->mii_up1, up1);
1410                 ret = 0;
1411         }
1412
1413         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1414                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1415                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1416
1417         return ret;
1418 }
1419
1420 static int
1421 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1422 {
1423         u32 up1;
1424         int ret = 0;
1425
1426         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1427                 return 0;
1428
1429         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1430                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1431
1432         bnx2_read_phy(bp, bp->mii_up1, &up1);
1433         if (up1 & BCM5708S_UP1_2G5) {
1434                 up1 &= ~BCM5708S_UP1_2G5;
1435                 bnx2_write_phy(bp, bp->mii_up1, up1);
1436                 ret = 1;
1437         }
1438
1439         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1440                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1441                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1442
1443         return ret;
1444 }
1445
1446 static void
1447 bnx2_enable_forced_2g5(struct bnx2 *bp)
1448 {
1449         u32 uninitialized_var(bmcr);
1450         int err;
1451
1452         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1453                 return;
1454
1455         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1456                 u32 val;
1457
1458                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1459                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1460                 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1461                         val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1462                         val |= MII_BNX2_SD_MISC1_FORCE |
1463                                 MII_BNX2_SD_MISC1_FORCE_2_5G;
1464                         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1465                 }
1466
1467                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1468                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1469                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1470
1471         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1472                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1473                 if (!err)
1474                         bmcr |= BCM5708S_BMCR_FORCE_2500;
1475         } else {
1476                 return;
1477         }
1478
1479         if (err)
1480                 return;
1481
1482         if (bp->autoneg & AUTONEG_SPEED) {
1483                 bmcr &= ~BMCR_ANENABLE;
1484                 if (bp->req_duplex == DUPLEX_FULL)
1485                         bmcr |= BMCR_FULLDPLX;
1486         }
1487         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1488 }
1489
1490 static void
1491 bnx2_disable_forced_2g5(struct bnx2 *bp)
1492 {
1493         u32 uninitialized_var(bmcr);
1494         int err;
1495
1496         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1497                 return;
1498
1499         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1500                 u32 val;
1501
1502                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1503                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1504                 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1505                         val &= ~MII_BNX2_SD_MISC1_FORCE;
1506                         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1507                 }
1508
1509                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1510                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1511                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1512
1513         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1514                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1515                 if (!err)
1516                         bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1517         } else {
1518                 return;
1519         }
1520
1521         if (err)
1522                 return;
1523
1524         if (bp->autoneg & AUTONEG_SPEED)
1525                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1526         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1527 }
1528
1529 static void
1530 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1531 {
1532         u32 val;
1533
1534         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1535         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1536         if (start)
1537                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1538         else
1539                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1540 }
1541
1542 static int
1543 bnx2_set_link(struct bnx2 *bp)
1544 {
1545         u32 bmsr;
1546         u8 link_up;
1547
1548         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1549                 bp->link_up = 1;
1550                 return 0;
1551         }
1552
1553         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1554                 return 0;
1555
1556         link_up = bp->link_up;
1557
1558         bnx2_enable_bmsr1(bp);
1559         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1560         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1561         bnx2_disable_bmsr1(bp);
1562
1563         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1564             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1565                 u32 val, an_dbg;
1566
1567                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1568                         bnx2_5706s_force_link_dn(bp, 0);
1569                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1570                 }
1571                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1572
1573                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1574                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1575                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1576
1577                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1578                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1579                         bmsr |= BMSR_LSTATUS;
1580                 else
1581                         bmsr &= ~BMSR_LSTATUS;
1582         }
1583
1584         if (bmsr & BMSR_LSTATUS) {
1585                 bp->link_up = 1;
1586
1587                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1588                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1589                                 bnx2_5706s_linkup(bp);
1590                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1591                                 bnx2_5708s_linkup(bp);
1592                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1593                                 bnx2_5709s_linkup(bp);
1594                 }
1595                 else {
1596                         bnx2_copper_linkup(bp);
1597                 }
1598                 bnx2_resolve_flow_ctrl(bp);
1599         }
1600         else {
1601                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1602                     (bp->autoneg & AUTONEG_SPEED))
1603                         bnx2_disable_forced_2g5(bp);
1604
1605                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1606                         u32 bmcr;
1607
1608                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1609                         bmcr |= BMCR_ANENABLE;
1610                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1611
1612                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1613                 }
1614                 bp->link_up = 0;
1615         }
1616
1617         if (bp->link_up != link_up) {
1618                 bnx2_report_link(bp);
1619         }
1620
1621         bnx2_set_mac_link(bp);
1622
1623         return 0;
1624 }
1625
1626 static int
1627 bnx2_reset_phy(struct bnx2 *bp)
1628 {
1629         int i;
1630         u32 reg;
1631
1632         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1633
1634 #define PHY_RESET_MAX_WAIT 100
1635         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1636                 udelay(10);
1637
1638                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1639                 if (!(reg & BMCR_RESET)) {
1640                         udelay(20);
1641                         break;
1642                 }
1643         }
1644         if (i == PHY_RESET_MAX_WAIT) {
1645                 return -EBUSY;
1646         }
1647         return 0;
1648 }
1649
1650 static u32
1651 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1652 {
1653         u32 adv = 0;
1654
1655         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1656                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1657
1658                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1659                         adv = ADVERTISE_1000XPAUSE;
1660                 }
1661                 else {
1662                         adv = ADVERTISE_PAUSE_CAP;
1663                 }
1664         }
1665         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1666                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1667                         adv = ADVERTISE_1000XPSE_ASYM;
1668                 }
1669                 else {
1670                         adv = ADVERTISE_PAUSE_ASYM;
1671                 }
1672         }
1673         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1674                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1675                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1676                 }
1677                 else {
1678                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1679                 }
1680         }
1681         return adv;
1682 }
1683
1684 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1685
1686 static int
1687 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1688 __releases(&bp->phy_lock)
1689 __acquires(&bp->phy_lock)
1690 {
1691         u32 speed_arg = 0, pause_adv;
1692
1693         pause_adv = bnx2_phy_get_pause_adv(bp);
1694
1695         if (bp->autoneg & AUTONEG_SPEED) {
1696                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1697                 if (bp->advertising & ADVERTISED_10baseT_Half)
1698                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1699                 if (bp->advertising & ADVERTISED_10baseT_Full)
1700                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1701                 if (bp->advertising & ADVERTISED_100baseT_Half)
1702                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1703                 if (bp->advertising & ADVERTISED_100baseT_Full)
1704                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1705                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1706                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1707                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1708                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1709         } else {
1710                 if (bp->req_line_speed == SPEED_2500)
1711                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1712                 else if (bp->req_line_speed == SPEED_1000)
1713                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1714                 else if (bp->req_line_speed == SPEED_100) {
1715                         if (bp->req_duplex == DUPLEX_FULL)
1716                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1717                         else
1718                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1719                 } else if (bp->req_line_speed == SPEED_10) {
1720                         if (bp->req_duplex == DUPLEX_FULL)
1721                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1722                         else
1723                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1724                 }
1725         }
1726
1727         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1728                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1729         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1730                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1731
1732         if (port == PORT_TP)
1733                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1734                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1735
1736         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1737
1738         spin_unlock_bh(&bp->phy_lock);
1739         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1740         spin_lock_bh(&bp->phy_lock);
1741
1742         return 0;
1743 }
1744
1745 static int
1746 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1747 __releases(&bp->phy_lock)
1748 __acquires(&bp->phy_lock)
1749 {
1750         u32 adv, bmcr;
1751         u32 new_adv = 0;
1752
1753         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1754                 return (bnx2_setup_remote_phy(bp, port));
1755
1756         if (!(bp->autoneg & AUTONEG_SPEED)) {
1757                 u32 new_bmcr;
1758                 int force_link_down = 0;
1759
1760                 if (bp->req_line_speed == SPEED_2500) {
1761                         if (!bnx2_test_and_enable_2g5(bp))
1762                                 force_link_down = 1;
1763                 } else if (bp->req_line_speed == SPEED_1000) {
1764                         if (bnx2_test_and_disable_2g5(bp))
1765                                 force_link_down = 1;
1766                 }
1767                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1768                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1769
1770                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1771                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1772                 new_bmcr |= BMCR_SPEED1000;
1773
1774                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1775                         if (bp->req_line_speed == SPEED_2500)
1776                                 bnx2_enable_forced_2g5(bp);
1777                         else if (bp->req_line_speed == SPEED_1000) {
1778                                 bnx2_disable_forced_2g5(bp);
1779                                 new_bmcr &= ~0x2000;
1780                         }
1781
1782                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1783                         if (bp->req_line_speed == SPEED_2500)
1784                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1785                         else
1786                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1787                 }
1788
1789                 if (bp->req_duplex == DUPLEX_FULL) {
1790                         adv |= ADVERTISE_1000XFULL;
1791                         new_bmcr |= BMCR_FULLDPLX;
1792                 }
1793                 else {
1794                         adv |= ADVERTISE_1000XHALF;
1795                         new_bmcr &= ~BMCR_FULLDPLX;
1796                 }
1797                 if ((new_bmcr != bmcr) || (force_link_down)) {
1798                         /* Force a link down visible on the other side */
1799                         if (bp->link_up) {
1800                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1801                                                ~(ADVERTISE_1000XFULL |
1802                                                  ADVERTISE_1000XHALF));
1803                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1804                                         BMCR_ANRESTART | BMCR_ANENABLE);
1805
1806                                 bp->link_up = 0;
1807                                 netif_carrier_off(bp->dev);
1808                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1809                                 bnx2_report_link(bp);
1810                         }
1811                         bnx2_write_phy(bp, bp->mii_adv, adv);
1812                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1813                 } else {
1814                         bnx2_resolve_flow_ctrl(bp);
1815                         bnx2_set_mac_link(bp);
1816                 }
1817                 return 0;
1818         }
1819
1820         bnx2_test_and_enable_2g5(bp);
1821
1822         if (bp->advertising & ADVERTISED_1000baseT_Full)
1823                 new_adv |= ADVERTISE_1000XFULL;
1824
1825         new_adv |= bnx2_phy_get_pause_adv(bp);
1826
1827         bnx2_read_phy(bp, bp->mii_adv, &adv);
1828         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1829
1830         bp->serdes_an_pending = 0;
1831         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1832                 /* Force a link down visible on the other side */
1833                 if (bp->link_up) {
1834                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1835                         spin_unlock_bh(&bp->phy_lock);
1836                         msleep(20);
1837                         spin_lock_bh(&bp->phy_lock);
1838                 }
1839
1840                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1841                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1842                         BMCR_ANENABLE);
1843                 /* Speed up link-up time when the link partner
1844                  * does not autonegotiate which is very common
1845                  * in blade servers. Some blade servers use
1846                  * IPMI for kerboard input and it's important
1847                  * to minimize link disruptions. Autoneg. involves
1848                  * exchanging base pages plus 3 next pages and
1849                  * normally completes in about 120 msec.
1850                  */
1851                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1852                 bp->serdes_an_pending = 1;
1853                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1854         } else {
1855                 bnx2_resolve_flow_ctrl(bp);
1856                 bnx2_set_mac_link(bp);
1857         }
1858
1859         return 0;
1860 }
1861
1862 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1863         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1864                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1865                 (ADVERTISED_1000baseT_Full)
1866
1867 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1868         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1869         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1870         ADVERTISED_1000baseT_Full)
1871
1872 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1873         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1874
1875 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1876
1877 static void
1878 bnx2_set_default_remote_link(struct bnx2 *bp)
1879 {
1880         u32 link;
1881
1882         if (bp->phy_port == PORT_TP)
1883                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1884         else
1885                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1886
1887         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1888                 bp->req_line_speed = 0;
1889                 bp->autoneg |= AUTONEG_SPEED;
1890                 bp->advertising = ADVERTISED_Autoneg;
1891                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1892                         bp->advertising |= ADVERTISED_10baseT_Half;
1893                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1894                         bp->advertising |= ADVERTISED_10baseT_Full;
1895                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1896                         bp->advertising |= ADVERTISED_100baseT_Half;
1897                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1898                         bp->advertising |= ADVERTISED_100baseT_Full;
1899                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1900                         bp->advertising |= ADVERTISED_1000baseT_Full;
1901                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1902                         bp->advertising |= ADVERTISED_2500baseX_Full;
1903         } else {
1904                 bp->autoneg = 0;
1905                 bp->advertising = 0;
1906                 bp->req_duplex = DUPLEX_FULL;
1907                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1908                         bp->req_line_speed = SPEED_10;
1909                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1910                                 bp->req_duplex = DUPLEX_HALF;
1911                 }
1912                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1913                         bp->req_line_speed = SPEED_100;
1914                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1915                                 bp->req_duplex = DUPLEX_HALF;
1916                 }
1917                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1918                         bp->req_line_speed = SPEED_1000;
1919                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1920                         bp->req_line_speed = SPEED_2500;
1921         }
1922 }
1923
1924 static void
1925 bnx2_set_default_link(struct bnx2 *bp)
1926 {
1927         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1928                 bnx2_set_default_remote_link(bp);
1929                 return;
1930         }
1931
1932         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1933         bp->req_line_speed = 0;
1934         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1935                 u32 reg;
1936
1937                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1938
1939                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1940                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1941                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1942                         bp->autoneg = 0;
1943                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1944                         bp->req_duplex = DUPLEX_FULL;
1945                 }
1946         } else
1947                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1948 }
1949
1950 static void
1951 bnx2_send_heart_beat(struct bnx2 *bp)
1952 {
1953         u32 msg;
1954         u32 addr;
1955
1956         spin_lock(&bp->indirect_lock);
1957         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1958         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1959         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1960         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1961         spin_unlock(&bp->indirect_lock);
1962 }
1963
1964 static void
1965 bnx2_remote_phy_event(struct bnx2 *bp)
1966 {
1967         u32 msg;
1968         u8 link_up = bp->link_up;
1969         u8 old_port;
1970
1971         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1972
1973         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1974                 bnx2_send_heart_beat(bp);
1975
1976         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1977
1978         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1979                 bp->link_up = 0;
1980         else {
1981                 u32 speed;
1982
1983                 bp->link_up = 1;
1984                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1985                 bp->duplex = DUPLEX_FULL;
1986                 switch (speed) {
1987                         case BNX2_LINK_STATUS_10HALF:
1988                                 bp->duplex = DUPLEX_HALF;
1989                         case BNX2_LINK_STATUS_10FULL:
1990                                 bp->line_speed = SPEED_10;
1991                                 break;
1992                         case BNX2_LINK_STATUS_100HALF:
1993                                 bp->duplex = DUPLEX_HALF;
1994                         case BNX2_LINK_STATUS_100BASE_T4:
1995                         case BNX2_LINK_STATUS_100FULL:
1996                                 bp->line_speed = SPEED_100;
1997                                 break;
1998                         case BNX2_LINK_STATUS_1000HALF:
1999                                 bp->duplex = DUPLEX_HALF;
2000                         case BNX2_LINK_STATUS_1000FULL:
2001                                 bp->line_speed = SPEED_1000;
2002                                 break;
2003                         case BNX2_LINK_STATUS_2500HALF:
2004                                 bp->duplex = DUPLEX_HALF;
2005                         case BNX2_LINK_STATUS_2500FULL:
2006                                 bp->line_speed = SPEED_2500;
2007                                 break;
2008                         default:
2009                                 bp->line_speed = 0;
2010                                 break;
2011                 }
2012
2013                 bp->flow_ctrl = 0;
2014                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2015                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2016                         if (bp->duplex == DUPLEX_FULL)
2017                                 bp->flow_ctrl = bp->req_flow_ctrl;
2018                 } else {
2019                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2020                                 bp->flow_ctrl |= FLOW_CTRL_TX;
2021                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2022                                 bp->flow_ctrl |= FLOW_CTRL_RX;
2023                 }
2024
2025                 old_port = bp->phy_port;
2026                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2027                         bp->phy_port = PORT_FIBRE;
2028                 else
2029                         bp->phy_port = PORT_TP;
2030
2031                 if (old_port != bp->phy_port)
2032                         bnx2_set_default_link(bp);
2033
2034         }
2035         if (bp->link_up != link_up)
2036                 bnx2_report_link(bp);
2037
2038         bnx2_set_mac_link(bp);
2039 }
2040
2041 static int
2042 bnx2_set_remote_link(struct bnx2 *bp)
2043 {
2044         u32 evt_code;
2045
2046         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2047         switch (evt_code) {
2048                 case BNX2_FW_EVT_CODE_LINK_EVENT:
2049                         bnx2_remote_phy_event(bp);
2050                         break;
2051                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2052                 default:
2053                         bnx2_send_heart_beat(bp);
2054                         break;
2055         }
2056         return 0;
2057 }
2058
2059 static int
2060 bnx2_setup_copper_phy(struct bnx2 *bp)
2061 __releases(&bp->phy_lock)
2062 __acquires(&bp->phy_lock)
2063 {
2064         u32 bmcr;
2065         u32 new_bmcr;
2066
2067         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2068
2069         if (bp->autoneg & AUTONEG_SPEED) {
2070                 u32 adv_reg, adv1000_reg;
2071                 u32 new_adv_reg = 0;
2072                 u32 new_adv1000_reg = 0;
2073
2074                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2075                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2076                         ADVERTISE_PAUSE_ASYM);
2077
2078                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2079                 adv1000_reg &= PHY_ALL_1000_SPEED;
2080
2081                 if (bp->advertising & ADVERTISED_10baseT_Half)
2082                         new_adv_reg |= ADVERTISE_10HALF;
2083                 if (bp->advertising & ADVERTISED_10baseT_Full)
2084                         new_adv_reg |= ADVERTISE_10FULL;
2085                 if (bp->advertising & ADVERTISED_100baseT_Half)
2086                         new_adv_reg |= ADVERTISE_100HALF;
2087                 if (bp->advertising & ADVERTISED_100baseT_Full)
2088                         new_adv_reg |= ADVERTISE_100FULL;
2089                 if (bp->advertising & ADVERTISED_1000baseT_Full)
2090                         new_adv1000_reg |= ADVERTISE_1000FULL;
2091
2092                 new_adv_reg |= ADVERTISE_CSMA;
2093
2094                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
2095
2096                 if ((adv1000_reg != new_adv1000_reg) ||
2097                         (adv_reg != new_adv_reg) ||
2098                         ((bmcr & BMCR_ANENABLE) == 0)) {
2099
2100                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
2101                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
2102                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2103                                 BMCR_ANENABLE);
2104                 }
2105                 else if (bp->link_up) {
2106                         /* Flow ctrl may have changed from auto to forced */
2107                         /* or vice-versa. */
2108
2109                         bnx2_resolve_flow_ctrl(bp);
2110                         bnx2_set_mac_link(bp);
2111                 }
2112                 return 0;
2113         }
2114
2115         new_bmcr = 0;
2116         if (bp->req_line_speed == SPEED_100) {
2117                 new_bmcr |= BMCR_SPEED100;
2118         }
2119         if (bp->req_duplex == DUPLEX_FULL) {
2120                 new_bmcr |= BMCR_FULLDPLX;
2121         }
2122         if (new_bmcr != bmcr) {
2123                 u32 bmsr;
2124
2125                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2126                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2127
2128                 if (bmsr & BMSR_LSTATUS) {
2129                         /* Force link down */
2130                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2131                         spin_unlock_bh(&bp->phy_lock);
2132                         msleep(50);
2133                         spin_lock_bh(&bp->phy_lock);
2134
2135                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2136                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2137                 }
2138
2139                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2140
2141                 /* Normally, the new speed is setup after the link has
2142                  * gone down and up again. In some cases, link will not go
2143                  * down so we need to set up the new speed here.
2144                  */
2145                 if (bmsr & BMSR_LSTATUS) {
2146                         bp->line_speed = bp->req_line_speed;
2147                         bp->duplex = bp->req_duplex;
2148                         bnx2_resolve_flow_ctrl(bp);
2149                         bnx2_set_mac_link(bp);
2150                 }
2151         } else {
2152                 bnx2_resolve_flow_ctrl(bp);
2153                 bnx2_set_mac_link(bp);
2154         }
2155         return 0;
2156 }
2157
2158 static int
2159 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2160 __releases(&bp->phy_lock)
2161 __acquires(&bp->phy_lock)
2162 {
2163         if (bp->loopback == MAC_LOOPBACK)
2164                 return 0;
2165
2166         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2167                 return (bnx2_setup_serdes_phy(bp, port));
2168         }
2169         else {
2170                 return (bnx2_setup_copper_phy(bp));
2171         }
2172 }
2173
2174 static int
2175 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2176 {
2177         u32 val;
2178
2179         bp->mii_bmcr = MII_BMCR + 0x10;
2180         bp->mii_bmsr = MII_BMSR + 0x10;
2181         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2182         bp->mii_adv = MII_ADVERTISE + 0x10;
2183         bp->mii_lpa = MII_LPA + 0x10;
2184         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2185
2186         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2187         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2188
2189         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2190         if (reset_phy)
2191                 bnx2_reset_phy(bp);
2192
2193         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2194
2195         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2196         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2197         val |= MII_BNX2_SD_1000XCTL1_FIBER;
2198         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2199
2200         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2201         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2202         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2203                 val |= BCM5708S_UP1_2G5;
2204         else
2205                 val &= ~BCM5708S_UP1_2G5;
2206         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2207
2208         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2209         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2210         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2211         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2212
2213         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2214
2215         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2216               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2217         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2218
2219         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2220
2221         return 0;
2222 }
2223
2224 static int
2225 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2226 {
2227         u32 val;
2228
2229         if (reset_phy)
2230                 bnx2_reset_phy(bp);
2231
2232         bp->mii_up1 = BCM5708S_UP1;
2233
2234         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2235         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2236         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2237
2238         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2239         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2240         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2241
2242         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2243         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2244         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2245
2246         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2247                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2248                 val |= BCM5708S_UP1_2G5;
2249                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2250         }
2251
2252         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2253             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2254             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2255                 /* increase tx signal amplitude */
2256                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2257                                BCM5708S_BLK_ADDR_TX_MISC);
2258                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2259                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2260                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2261                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2262         }
2263
2264         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2265               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2266
2267         if (val) {
2268                 u32 is_backplane;
2269
2270                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2271                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2272                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2273                                        BCM5708S_BLK_ADDR_TX_MISC);
2274                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2275                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2276                                        BCM5708S_BLK_ADDR_DIG);
2277                 }
2278         }
2279         return 0;
2280 }
2281
2282 static int
2283 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2284 {
2285         if (reset_phy)
2286                 bnx2_reset_phy(bp);
2287
2288         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2289
2290         if (CHIP_NUM(bp) == CHIP_NUM_5706)
2291                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2292
2293         if (bp->dev->mtu > 1500) {
2294                 u32 val;
2295
2296                 /* Set extended packet length bit */
2297                 bnx2_write_phy(bp, 0x18, 0x7);
2298                 bnx2_read_phy(bp, 0x18, &val);
2299                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2300
2301                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2302                 bnx2_read_phy(bp, 0x1c, &val);
2303                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2304         }
2305         else {
2306                 u32 val;
2307
2308                 bnx2_write_phy(bp, 0x18, 0x7);
2309                 bnx2_read_phy(bp, 0x18, &val);
2310                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2311
2312                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2313                 bnx2_read_phy(bp, 0x1c, &val);
2314                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2315         }
2316
2317         return 0;
2318 }
2319
2320 static int
2321 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2322 {
2323         u32 val;
2324
2325         if (reset_phy)
2326                 bnx2_reset_phy(bp);
2327
2328         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2329                 bnx2_write_phy(bp, 0x18, 0x0c00);
2330                 bnx2_write_phy(bp, 0x17, 0x000a);
2331                 bnx2_write_phy(bp, 0x15, 0x310b);
2332                 bnx2_write_phy(bp, 0x17, 0x201f);
2333                 bnx2_write_phy(bp, 0x15, 0x9506);
2334                 bnx2_write_phy(bp, 0x17, 0x401f);
2335                 bnx2_write_phy(bp, 0x15, 0x14e2);
2336                 bnx2_write_phy(bp, 0x18, 0x0400);
2337         }
2338
2339         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2340                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2341                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2342                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2343                 val &= ~(1 << 8);
2344                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2345         }
2346
2347         if (bp->dev->mtu > 1500) {
2348                 /* Set extended packet length bit */
2349                 bnx2_write_phy(bp, 0x18, 0x7);
2350                 bnx2_read_phy(bp, 0x18, &val);
2351                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2352
2353                 bnx2_read_phy(bp, 0x10, &val);
2354                 bnx2_write_phy(bp, 0x10, val | 0x1);
2355         }
2356         else {
2357                 bnx2_write_phy(bp, 0x18, 0x7);
2358                 bnx2_read_phy(bp, 0x18, &val);
2359                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2360
2361                 bnx2_read_phy(bp, 0x10, &val);
2362                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2363         }
2364
2365         /* ethernet@wirespeed */
2366         bnx2_write_phy(bp, 0x18, 0x7007);
2367         bnx2_read_phy(bp, 0x18, &val);
2368         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2369         return 0;
2370 }
2371
2372
2373 static int
2374 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2375 __releases(&bp->phy_lock)
2376 __acquires(&bp->phy_lock)
2377 {
2378         u32 val;
2379         int rc = 0;
2380
2381         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2382         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2383
2384         bp->mii_bmcr = MII_BMCR;
2385         bp->mii_bmsr = MII_BMSR;
2386         bp->mii_bmsr1 = MII_BMSR;
2387         bp->mii_adv = MII_ADVERTISE;
2388         bp->mii_lpa = MII_LPA;
2389
2390         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2391
2392         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2393                 goto setup_phy;
2394
2395         bnx2_read_phy(bp, MII_PHYSID1, &val);
2396         bp->phy_id = val << 16;
2397         bnx2_read_phy(bp, MII_PHYSID2, &val);
2398         bp->phy_id |= val & 0xffff;
2399
2400         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2401                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2402                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2403                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2404                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2405                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2406                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2407         }
2408         else {
2409                 rc = bnx2_init_copper_phy(bp, reset_phy);
2410         }
2411
2412 setup_phy:
2413         if (!rc)
2414                 rc = bnx2_setup_phy(bp, bp->phy_port);
2415
2416         return rc;
2417 }
2418
2419 static int
2420 bnx2_set_mac_loopback(struct bnx2 *bp)
2421 {
2422         u32 mac_mode;
2423
2424         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2425         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2426         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2427         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2428         bp->link_up = 1;
2429         return 0;
2430 }
2431
2432 static int bnx2_test_link(struct bnx2 *);
2433
2434 static int
2435 bnx2_set_phy_loopback(struct bnx2 *bp)
2436 {
2437         u32 mac_mode;
2438         int rc, i;
2439
2440         spin_lock_bh(&bp->phy_lock);
2441         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2442                             BMCR_SPEED1000);
2443         spin_unlock_bh(&bp->phy_lock);
2444         if (rc)
2445                 return rc;
2446
2447         for (i = 0; i < 10; i++) {
2448                 if (bnx2_test_link(bp) == 0)
2449                         break;
2450                 msleep(100);
2451         }
2452
2453         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2454         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2455                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2456                       BNX2_EMAC_MODE_25G_MODE);
2457
2458         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2459         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2460         bp->link_up = 1;
2461         return 0;
2462 }
2463
2464 static int
2465 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2466 {
2467         int i;
2468         u32 val;
2469
2470         bp->fw_wr_seq++;
2471         msg_data |= bp->fw_wr_seq;
2472
2473         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2474
2475         if (!ack)
2476                 return 0;
2477
2478         /* wait for an acknowledgement. */
2479         for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2480                 msleep(10);
2481
2482                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2483
2484                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2485                         break;
2486         }
2487         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2488                 return 0;
2489
2490         /* If we timed out, inform the firmware that this is the case. */
2491         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2492                 if (!silent)
2493                         pr_err("fw sync timeout, reset code = %x\n", msg_data);
2494
2495                 msg_data &= ~BNX2_DRV_MSG_CODE;
2496                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2497
2498                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2499
2500                 return -EBUSY;
2501         }
2502
2503         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2504                 return -EIO;
2505
2506         return 0;
2507 }
2508
2509 static int
2510 bnx2_init_5709_context(struct bnx2 *bp)
2511 {
2512         int i, ret = 0;
2513         u32 val;
2514
2515         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2516         val |= (BCM_PAGE_BITS - 8) << 16;
2517         REG_WR(bp, BNX2_CTX_COMMAND, val);
2518         for (i = 0; i < 10; i++) {
2519                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2520                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2521                         break;
2522                 udelay(2);
2523         }
2524         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2525                 return -EBUSY;
2526
2527         for (i = 0; i < bp->ctx_pages; i++) {
2528                 int j;
2529
2530                 if (bp->ctx_blk[i])
2531                         memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2532                 else
2533                         return -ENOMEM;
2534
2535                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2536                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2537                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2538                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2539                        (u64) bp->ctx_blk_mapping[i] >> 32);
2540                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2541                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2542                 for (j = 0; j < 10; j++) {
2543
2544                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2545                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2546                                 break;
2547                         udelay(5);
2548                 }
2549                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2550                         ret = -EBUSY;
2551                         break;
2552                 }
2553         }
2554         return ret;
2555 }
2556
2557 static void
2558 bnx2_init_context(struct bnx2 *bp)
2559 {
2560         u32 vcid;
2561
2562         vcid = 96;
2563         while (vcid) {
2564                 u32 vcid_addr, pcid_addr, offset;
2565                 int i;
2566
2567                 vcid--;
2568
2569                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2570                         u32 new_vcid;
2571
2572                         vcid_addr = GET_PCID_ADDR(vcid);
2573                         if (vcid & 0x8) {
2574                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2575                         }
2576                         else {
2577                                 new_vcid = vcid;
2578                         }
2579                         pcid_addr = GET_PCID_ADDR(new_vcid);
2580                 }
2581                 else {
2582                         vcid_addr = GET_CID_ADDR(vcid);
2583                         pcid_addr = vcid_addr;
2584                 }
2585
2586                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2587                         vcid_addr += (i << PHY_CTX_SHIFT);
2588                         pcid_addr += (i << PHY_CTX_SHIFT);
2589
2590                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2591                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2592
2593                         /* Zero out the context. */
2594                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2595                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2596                 }
2597         }
2598 }
2599
2600 static int
2601 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2602 {
2603         u16 *good_mbuf;
2604         u32 good_mbuf_cnt;
2605         u32 val;
2606
2607         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2608         if (good_mbuf == NULL) {
2609                 pr_err("Failed to allocate memory in %s\n", __func__);
2610                 return -ENOMEM;
2611         }
2612
2613         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2614                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2615
2616         good_mbuf_cnt = 0;
2617
2618         /* Allocate a bunch of mbufs and save the good ones in an array. */
2619         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2620         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2621                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2622                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2623
2624                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2625
2626                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2627
2628                 /* The addresses with Bit 9 set are bad memory blocks. */
2629                 if (!(val & (1 << 9))) {
2630                         good_mbuf[good_mbuf_cnt] = (u16) val;
2631                         good_mbuf_cnt++;
2632                 }
2633
2634                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2635         }
2636
2637         /* Free the good ones back to the mbuf pool thus discarding
2638          * all the bad ones. */
2639         while (good_mbuf_cnt) {
2640                 good_mbuf_cnt--;
2641
2642                 val = good_mbuf[good_mbuf_cnt];
2643                 val = (val << 9) | val | 1;
2644
2645                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2646         }
2647         kfree(good_mbuf);
2648         return 0;
2649 }
2650
2651 static void
2652 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2653 {
2654         u32 val;
2655
2656         val = (mac_addr[0] << 8) | mac_addr[1];
2657
2658         REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2659
2660         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2661                 (mac_addr[4] << 8) | mac_addr[5];
2662
2663         REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2664 }
2665
2666 static inline int
2667 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2668 {
2669         dma_addr_t mapping;
2670         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2671         struct rx_bd *rxbd =
2672                 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2673         struct page *page = alloc_page(GFP_ATOMIC);
2674
2675         if (!page)
2676                 return -ENOMEM;
2677         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2678                                PCI_DMA_FROMDEVICE);
2679         if (pci_dma_mapping_error(bp->pdev, mapping)) {
2680                 __free_page(page);
2681                 return -EIO;
2682         }
2683
2684         rx_pg->page = page;
2685         dma_unmap_addr_set(rx_pg, mapping, mapping);
2686         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2687         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2688         return 0;
2689 }
2690
2691 static void
2692 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2693 {
2694         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2695         struct page *page = rx_pg->page;
2696
2697         if (!page)
2698                 return;
2699
2700         pci_unmap_page(bp->pdev, dma_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2701                        PCI_DMA_FROMDEVICE);
2702
2703         __free_page(page);
2704         rx_pg->page = NULL;
2705 }
2706
2707 static inline int
2708 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2709 {
2710         struct sk_buff *skb;
2711         struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2712         dma_addr_t mapping;
2713         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2714         unsigned long align;
2715
2716         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2717         if (skb == NULL) {
2718                 return -ENOMEM;
2719         }
2720
2721         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2722                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2723
2724         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2725                 PCI_DMA_FROMDEVICE);
2726         if (pci_dma_mapping_error(bp->pdev, mapping)) {
2727                 dev_kfree_skb(skb);
2728                 return -EIO;
2729         }
2730
2731         rx_buf->skb = skb;
2732         rx_buf->desc = (struct l2_fhdr *) skb->data;
2733         dma_unmap_addr_set(rx_buf, mapping, mapping);
2734
2735         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2736         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2737
2738         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2739
2740         return 0;
2741 }
2742
2743 static int
2744 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2745 {
2746         struct status_block *sblk = bnapi->status_blk.msi;
2747         u32 new_link_state, old_link_state;
2748         int is_set = 1;
2749
2750         new_link_state = sblk->status_attn_bits & event;
2751         old_link_state = sblk->status_attn_bits_ack & event;
2752         if (new_link_state != old_link_state) {
2753                 if (new_link_state)
2754                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2755                 else
2756                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2757         } else
2758                 is_set = 0;
2759
2760         return is_set;
2761 }
2762
2763 static void
2764 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2765 {
2766         spin_lock(&bp->phy_lock);
2767
2768         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2769                 bnx2_set_link(bp);
2770         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2771                 bnx2_set_remote_link(bp);
2772
2773         spin_unlock(&bp->phy_lock);
2774
2775 }
2776
2777 static inline u16
2778 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2779 {
2780         u16 cons;
2781
2782         /* Tell compiler that status block fields can change. */
2783         barrier();
2784         cons = *bnapi->hw_tx_cons_ptr;
2785         barrier();
2786         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2787                 cons++;
2788         return cons;
2789 }
2790
2791 static int
2792 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2793 {
2794         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2795         u16 hw_cons, sw_cons, sw_ring_cons;
2796         int tx_pkt = 0, index;
2797         struct netdev_queue *txq;
2798
2799         index = (bnapi - bp->bnx2_napi);
2800         txq = netdev_get_tx_queue(bp->dev, index);
2801
2802         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2803         sw_cons = txr->tx_cons;
2804
2805         while (sw_cons != hw_cons) {
2806                 struct sw_tx_bd *tx_buf;
2807                 struct sk_buff *skb;
2808                 int i, last;
2809
2810                 sw_ring_cons = TX_RING_IDX(sw_cons);
2811
2812                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2813                 skb = tx_buf->skb;
2814
2815                 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2816                 prefetch(&skb->end);
2817
2818                 /* partial BD completions possible with TSO packets */
2819                 if (tx_buf->is_gso) {
2820                         u16 last_idx, last_ring_idx;
2821
2822                         last_idx = sw_cons + tx_buf->nr_frags + 1;
2823                         last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2824                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2825                                 last_idx++;
2826                         }
2827                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2828                                 break;
2829                         }
2830                 }
2831
2832                 pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
2833                         skb_headlen(skb), PCI_DMA_TODEVICE);
2834
2835                 tx_buf->skb = NULL;
2836                 last = tx_buf->nr_frags;
2837
2838                 for (i = 0; i < last; i++) {
2839                         sw_cons = NEXT_TX_BD(sw_cons);
2840
2841                         pci_unmap_page(bp->pdev,
2842                                 dma_unmap_addr(
2843                                         &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2844                                         mapping),
2845                                 skb_shinfo(skb)->frags[i].size,
2846                                 PCI_DMA_TODEVICE);
2847                 }
2848
2849                 sw_cons = NEXT_TX_BD(sw_cons);
2850
2851                 dev_kfree_skb(skb);
2852                 tx_pkt++;
2853                 if (tx_pkt == budget)
2854                         break;
2855
2856                 if (hw_cons == sw_cons)
2857                         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2858         }
2859
2860         txr->hw_tx_cons = hw_cons;
2861         txr->tx_cons = sw_cons;
2862
2863         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2864          * before checking for netif_tx_queue_stopped().  Without the
2865          * memory barrier, there is a small possibility that bnx2_start_xmit()
2866          * will miss it and cause the queue to be stopped forever.
2867          */
2868         smp_mb();
2869
2870         if (unlikely(netif_tx_queue_stopped(txq)) &&
2871                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2872                 __netif_tx_lock(txq, smp_processor_id());
2873                 if ((netif_tx_queue_stopped(txq)) &&
2874                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2875                         netif_tx_wake_queue(txq);
2876                 __netif_tx_unlock(txq);
2877         }
2878
2879         return tx_pkt;
2880 }
2881
2882 static void
2883 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2884                         struct sk_buff *skb, int count)
2885 {
2886         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2887         struct rx_bd *cons_bd, *prod_bd;
2888         int i;
2889         u16 hw_prod, prod;
2890         u16 cons = rxr->rx_pg_cons;
2891
2892         cons_rx_pg = &rxr->rx_pg_ring[cons];
2893
2894         /* The caller was unable to allocate a new page to replace the
2895          * last one in the frags array, so we need to recycle that page
2896          * and then free the skb.
2897          */
2898         if (skb) {
2899                 struct page *page;
2900                 struct skb_shared_info *shinfo;
2901
2902                 shinfo = skb_shinfo(skb);
2903                 shinfo->nr_frags--;
2904                 page = shinfo->frags[shinfo->nr_frags].page;
2905                 shinfo->frags[shinfo->nr_frags].page = NULL;
2906
2907                 cons_rx_pg->page = page;
2908                 dev_kfree_skb(skb);
2909         }
2910
2911         hw_prod = rxr->rx_pg_prod;
2912
2913         for (i = 0; i < count; i++) {
2914                 prod = RX_PG_RING_IDX(hw_prod);
2915
2916                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2917                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2918                 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2919                 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2920
2921                 if (prod != cons) {
2922                         prod_rx_pg->page = cons_rx_pg->page;
2923                         cons_rx_pg->page = NULL;
2924                         dma_unmap_addr_set(prod_rx_pg, mapping,
2925                                 dma_unmap_addr(cons_rx_pg, mapping));
2926
2927                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2928                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2929
2930                 }
2931                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2932                 hw_prod = NEXT_RX_BD(hw_prod);
2933         }
2934         rxr->rx_pg_prod = hw_prod;
2935         rxr->rx_pg_cons = cons;
2936 }
2937
2938 static inline void
2939 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2940                   struct sk_buff *skb, u16 cons, u16 prod)
2941 {
2942         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2943         struct rx_bd *cons_bd, *prod_bd;
2944
2945         cons_rx_buf = &rxr->rx_buf_ring[cons];
2946         prod_rx_buf = &rxr->rx_buf_ring[prod];
2947
2948         pci_dma_sync_single_for_device(bp->pdev,
2949                 dma_unmap_addr(cons_rx_buf, mapping),
2950                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2951
2952         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2953
2954         prod_rx_buf->skb = skb;
2955         prod_rx_buf->desc = (struct l2_fhdr *) skb->data;
2956
2957         if (cons == prod)
2958                 return;
2959
2960         dma_unmap_addr_set(prod_rx_buf, mapping,
2961                         dma_unmap_addr(cons_rx_buf, mapping));
2962
2963         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2964         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2965         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2966         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2967 }
2968
2969 static int
2970 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2971             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2972             u32 ring_idx)
2973 {
2974         int err;
2975         u16 prod = ring_idx & 0xffff;
2976
2977         err = bnx2_alloc_rx_skb(bp, rxr, prod);
2978         if (unlikely(err)) {
2979                 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2980                 if (hdr_len) {
2981                         unsigned int raw_len = len + 4;
2982                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2983
2984                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2985                 }
2986                 return err;
2987         }
2988
2989         skb_reserve(skb, BNX2_RX_OFFSET);
2990         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2991                          PCI_DMA_FROMDEVICE);
2992
2993         if (hdr_len == 0) {
2994                 skb_put(skb, len);
2995                 return 0;
2996         } else {
2997                 unsigned int i, frag_len, frag_size, pages;
2998                 struct sw_pg *rx_pg;
2999                 u16 pg_cons = rxr->rx_pg_cons;
3000                 u16 pg_prod = rxr->rx_pg_prod;
3001
3002                 frag_size = len + 4 - hdr_len;
3003                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3004                 skb_put(skb, hdr_len);
3005
3006                 for (i = 0; i < pages; i++) {
3007                         dma_addr_t mapping_old;
3008
3009                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3010                         if (unlikely(frag_len <= 4)) {
3011                                 unsigned int tail = 4 - frag_len;
3012
3013                                 rxr->rx_pg_cons = pg_cons;
3014                                 rxr->rx_pg_prod = pg_prod;
3015                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3016                                                         pages - i);
3017                                 skb->len -= tail;
3018                                 if (i == 0) {
3019                                         skb->tail -= tail;
3020                                 } else {
3021                                         skb_frag_t *frag =
3022                                                 &skb_shinfo(skb)->frags[i - 1];
3023                                         frag->size -= tail;
3024                                         skb->data_len -= tail;
3025                                         skb->truesize -= tail;
3026                                 }
3027                                 return 0;
3028                         }
3029                         rx_pg = &rxr->rx_pg_ring[pg_cons];
3030
3031                         /* Don't unmap yet.  If we're unable to allocate a new
3032                          * page, we need to recycle the page and the DMA addr.
3033                          */
3034                         mapping_old = dma_unmap_addr(rx_pg, mapping);
3035                         if (i == pages - 1)
3036                                 frag_len -= 4;
3037
3038                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3039                         rx_pg->page = NULL;
3040
3041                         err = bnx2_alloc_rx_page(bp, rxr,
3042                                                  RX_PG_RING_IDX(pg_prod));
3043                         if (unlikely(err)) {
3044                                 rxr->rx_pg_cons = pg_cons;
3045                                 rxr->rx_pg_prod = pg_prod;
3046                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3047                                                         pages - i);
3048                                 return err;
3049                         }
3050
3051                         pci_unmap_page(bp->pdev, mapping_old,
3052                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
3053
3054                         frag_size -= frag_len;
3055                         skb->data_len += frag_len;
3056                         skb->truesize += frag_len;
3057                         skb->len += frag_len;
3058
3059                         pg_prod = NEXT_RX_BD(pg_prod);
3060                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3061                 }
3062                 rxr->rx_pg_prod = pg_prod;
3063                 rxr->rx_pg_cons = pg_cons;
3064         }
3065         return 0;
3066 }
3067
3068 static inline u16
3069 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3070 {
3071         u16 cons;
3072
3073         /* Tell compiler that status block fields can change. */
3074         barrier();
3075         cons = *bnapi->hw_rx_cons_ptr;
3076         barrier();
3077         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3078                 cons++;
3079         return cons;
3080 }
3081
3082 static int
3083 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3084 {
3085         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3086         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3087         struct l2_fhdr *rx_hdr;
3088         int rx_pkt = 0, pg_ring_used = 0;
3089
3090         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3091         sw_cons = rxr->rx_cons;
3092         sw_prod = rxr->rx_prod;
3093
3094         /* Memory barrier necessary as speculative reads of the rx
3095          * buffer can be ahead of the index in the status block
3096          */
3097         rmb();
3098         while (sw_cons != hw_cons) {
3099                 unsigned int len, hdr_len;
3100                 u32 status;
3101                 struct sw_bd *rx_buf, *next_rx_buf;
3102                 struct sk_buff *skb;
3103                 dma_addr_t dma_addr;
3104                 u16 vtag = 0;
3105                 int hw_vlan __maybe_unused = 0;
3106
3107                 sw_ring_cons = RX_RING_IDX(sw_cons);
3108                 sw_ring_prod = RX_RING_IDX(sw_prod);
3109
3110                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3111                 skb = rx_buf->skb;
3112                 prefetchw(skb);
3113
3114                 next_rx_buf =
3115                         &rxr->rx_buf_ring[RX_RING_IDX(NEXT_RX_BD(sw_cons))];
3116                 prefetch(next_rx_buf->desc);
3117
3118                 rx_buf->skb = NULL;
3119
3120                 dma_addr = dma_unmap_addr(rx_buf, mapping);
3121
3122                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
3123                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3124                         PCI_DMA_FROMDEVICE);
3125
3126                 rx_hdr = rx_buf->desc;
3127                 len = rx_hdr->l2_fhdr_pkt_len;
3128                 status = rx_hdr->l2_fhdr_status;
3129
3130                 hdr_len = 0;
3131                 if (status & L2_FHDR_STATUS_SPLIT) {
3132                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3133                         pg_ring_used = 1;
3134                 } else if (len > bp->rx_jumbo_thresh) {
3135                         hdr_len = bp->rx_jumbo_thresh;
3136                         pg_ring_used = 1;
3137                 }
3138
3139                 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3140                                        L2_FHDR_ERRORS_PHY_DECODE |
3141                                        L2_FHDR_ERRORS_ALIGNMENT |
3142                                        L2_FHDR_ERRORS_TOO_SHORT |
3143                                        L2_FHDR_ERRORS_GIANT_FRAME))) {
3144
3145                         bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3146                                           sw_ring_prod);
3147                         if (pg_ring_used) {
3148                                 int pages;
3149
3150                                 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3151
3152                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3153                         }
3154                         goto next_rx;
3155                 }
3156
3157                 len -= 4;
3158
3159                 if (len <= bp->rx_copy_thresh) {
3160                         struct sk_buff *new_skb;
3161
3162                         new_skb = netdev_alloc_skb(bp->dev, len + 6);
3163                         if (new_skb == NULL) {
3164                                 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3165                                                   sw_ring_prod);
3166                                 goto next_rx;
3167                         }
3168
3169                         /* aligned copy */
3170                         skb_copy_from_linear_data_offset(skb,
3171                                                          BNX2_RX_OFFSET - 6,
3172                                       new_skb->data, len + 6);
3173                         skb_reserve(new_skb, 6);
3174                         skb_put(new_skb, len);
3175
3176                         bnx2_reuse_rx_skb(bp, rxr, skb,
3177                                 sw_ring_cons, sw_ring_prod);
3178
3179                         skb = new_skb;
3180                 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
3181                            dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
3182                         goto next_rx;
3183
3184                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3185                     !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
3186                         vtag = rx_hdr->l2_fhdr_vlan_tag;
3187 #ifdef BCM_VLAN
3188                         if (bp->vlgrp)
3189                                 hw_vlan = 1;
3190                         else
3191 #endif
3192                         {
3193                                 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
3194                                         __skb_push(skb, 4);
3195
3196                                 memmove(ve, skb->data + 4, ETH_ALEN * 2);
3197                                 ve->h_vlan_proto = htons(ETH_P_8021Q);
3198                                 ve->h_vlan_TCI = htons(vtag);
3199                                 len += 4;
3200                         }
3201                 }
3202
3203                 skb->protocol = eth_type_trans(skb, bp->dev);
3204
3205                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3206                         (ntohs(skb->protocol) != 0x8100)) {
3207
3208                         dev_kfree_skb(skb);
3209                         goto next_rx;
3210
3211                 }
3212
3213                 skb->ip_summed = CHECKSUM_NONE;
3214                 if (bp->rx_csum &&
3215                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3216                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
3217
3218                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3219                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3220                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3221                 }
3222                 if ((bp->dev->features & NETIF_F_RXHASH) &&
3223                     ((status & L2_FHDR_STATUS_USE_RXHASH) ==
3224                      L2_FHDR_STATUS_USE_RXHASH))
3225                         skb->rxhash = rx_hdr->l2_fhdr_hash;
3226
3227                 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3228
3229 #ifdef BCM_VLAN
3230                 if (hw_vlan)
3231                         vlan_gro_receive(&bnapi->napi, bp->vlgrp, vtag, skb);
3232                 else
3233 #endif
3234                         napi_gro_receive(&bnapi->napi, skb);
3235
3236                 rx_pkt++;
3237
3238 next_rx:
3239                 sw_cons = NEXT_RX_BD(sw_cons);
3240                 sw_prod = NEXT_RX_BD(sw_prod);
3241
3242                 if ((rx_pkt == budget))
3243                         break;
3244
3245                 /* Refresh hw_cons to see if there is new work */
3246                 if (sw_cons == hw_cons) {
3247                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3248                         rmb();
3249                 }
3250         }
3251         rxr->rx_cons = sw_cons;
3252         rxr->rx_prod = sw_prod;
3253
3254         if (pg_ring_used)
3255                 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3256
3257         REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3258
3259         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3260
3261         mmiowb();
3262
3263         return rx_pkt;
3264
3265 }
3266
3267 /* MSI ISR - The only difference between this and the INTx ISR
3268  * is that the MSI interrupt is always serviced.
3269  */
3270 static irqreturn_t
3271 bnx2_msi(int irq, void *dev_instance)
3272 {
3273         struct bnx2_napi *bnapi = dev_instance;
3274         struct bnx2 *bp = bnapi->bp;
3275
3276         prefetch(bnapi->status_blk.msi);
3277         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3278                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3279                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3280
3281         /* Return here if interrupt is disabled. */
3282         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3283                 return IRQ_HANDLED;
3284
3285         napi_schedule(&bnapi->napi);
3286
3287         return IRQ_HANDLED;
3288 }
3289
3290 static irqreturn_t
3291 bnx2_msi_1shot(int irq, void *dev_instance)
3292 {
3293         struct bnx2_napi *bnapi = dev_instance;
3294         struct bnx2 *bp = bnapi->bp;
3295
3296         prefetch(bnapi->status_blk.msi);
3297
3298         /* Return here if interrupt is disabled. */
3299         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3300                 return IRQ_HANDLED;
3301
3302         napi_schedule(&bnapi->napi);
3303
3304         return IRQ_HANDLED;
3305 }
3306
3307 static irqreturn_t
3308 bnx2_interrupt(int irq, void *dev_instance)
3309 {
3310         struct bnx2_napi *bnapi = dev_instance;
3311         struct bnx2 *bp = bnapi->bp;
3312         struct status_block *sblk = bnapi->status_blk.msi;
3313
3314         /* When using INTx, it is possible for the interrupt to arrive
3315          * at the CPU before the status block posted prior to the
3316          * interrupt. Reading a register will flush the status block.
3317          * When using MSI, the MSI message will always complete after
3318          * the status block write.
3319          */
3320         if ((sblk->status_idx == bnapi->last_status_idx) &&
3321             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3322              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3323                 return IRQ_NONE;
3324
3325         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3326                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3327                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3328
3329         /* Read back to deassert IRQ immediately to avoid too many
3330          * spurious interrupts.
3331          */
3332         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3333
3334         /* Return here if interrupt is shared and is disabled. */
3335         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3336                 return IRQ_HANDLED;
3337
3338         if (napi_schedule_prep(&bnapi->napi)) {
3339                 bnapi->last_status_idx = sblk->status_idx;
3340                 __napi_schedule(&bnapi->napi);
3341         }
3342
3343         return IRQ_HANDLED;
3344 }
3345
3346 static inline int
3347 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3348 {
3349         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3350         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3351
3352         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3353             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3354                 return 1;
3355         return 0;
3356 }
3357
3358 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3359                                  STATUS_ATTN_BITS_TIMER_ABORT)
3360
3361 static inline int
3362 bnx2_has_work(struct bnx2_napi *bnapi)
3363 {
3364         struct status_block *sblk = bnapi->status_blk.msi;
3365
3366         if (bnx2_has_fast_work(bnapi))
3367                 return 1;
3368
3369 #ifdef BCM_CNIC
3370         if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3371                 return 1;
3372 #endif
3373
3374         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3375             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3376                 return 1;
3377
3378         return 0;
3379 }
3380
3381 static void
3382 bnx2_chk_missed_msi(struct bnx2 *bp)
3383 {
3384         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3385         u32 msi_ctrl;
3386
3387         if (bnx2_has_work(bnapi)) {
3388                 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3389                 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3390                         return;
3391
3392                 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3393                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3394                                ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3395                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3396                         bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3397                 }
3398         }
3399
3400         bp->idle_chk_status_idx = bnapi->last_status_idx;
3401 }
3402
3403 #ifdef BCM_CNIC
3404 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3405 {
3406         struct cnic_ops *c_ops;
3407
3408         if (!bnapi->cnic_present)
3409                 return;
3410
3411         rcu_read_lock();
3412         c_ops = rcu_dereference(bp->cnic_ops);
3413         if (c_ops)
3414                 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3415                                                       bnapi->status_blk.msi);
3416         rcu_read_unlock();
3417 }
3418 #endif
3419
3420 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3421 {
3422         struct status_block *sblk = bnapi->status_blk.msi;
3423         u32 status_attn_bits = sblk->status_attn_bits;
3424         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3425
3426         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3427             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3428
3429                 bnx2_phy_int(bp, bnapi);
3430
3431                 /* This is needed to take care of transient status
3432                  * during link changes.
3433                  */
3434                 REG_WR(bp, BNX2_HC_COMMAND,
3435                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3436                 REG_RD(bp, BNX2_HC_COMMAND);
3437         }
3438 }
3439
3440 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3441                           int work_done, int budget)
3442 {
3443         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3444         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3445
3446         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3447                 bnx2_tx_int(bp, bnapi, 0);
3448
3449         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3450                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3451
3452         return work_done;
3453 }
3454
3455 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3456 {
3457         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3458         struct bnx2 *bp = bnapi->bp;
3459         int work_done = 0;
3460         struct status_block_msix *sblk = bnapi->status_blk.msix;
3461
3462         while (1) {
3463                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3464                 if (unlikely(work_done >= budget))
3465                         break;
3466
3467                 bnapi->last_status_idx = sblk->status_idx;
3468                 /* status idx must be read before checking for more work. */
3469                 rmb();
3470                 if (likely(!bnx2_has_fast_work(bnapi))) {
3471
3472                         napi_complete(napi);
3473                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3474                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3475                                bnapi->last_status_idx);
3476                         break;
3477                 }
3478         }
3479         return work_done;
3480 }
3481
3482 static int bnx2_poll(struct napi_struct *napi, int budget)
3483 {
3484         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3485         struct bnx2 *bp = bnapi->bp;
3486         int work_done = 0;
3487         struct status_block *sblk = bnapi->status_blk.msi;
3488
3489         while (1) {
3490                 bnx2_poll_link(bp, bnapi);
3491
3492                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3493
3494 #ifdef BCM_CNIC
3495                 bnx2_poll_cnic(bp, bnapi);
3496 #endif
3497
3498                 /* bnapi->last_status_idx is used below to tell the hw how
3499                  * much work has been processed, so we must read it before
3500                  * checking for more work.
3501                  */
3502                 bnapi->last_status_idx = sblk->status_idx;
3503
3504                 if (unlikely(work_done >= budget))
3505                         break;
3506
3507                 rmb();
3508                 if (likely(!bnx2_has_work(bnapi))) {
3509                         napi_complete(napi);
3510                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3511                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3512                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3513                                        bnapi->last_status_idx);
3514                                 break;
3515                         }
3516                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3517                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3518                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3519                                bnapi->last_status_idx);
3520
3521                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3522                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3523                                bnapi->last_status_idx);
3524                         break;
3525                 }
3526         }
3527
3528         return work_done;
3529 }
3530
3531 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3532  * from set_multicast.
3533  */
3534 static void
3535 bnx2_set_rx_mode(struct net_device *dev)
3536 {
3537         struct bnx2 *bp = netdev_priv(dev);
3538         u32 rx_mode, sort_mode;
3539         struct netdev_hw_addr *ha;
3540         int i;
3541
3542         if (!netif_running(dev))
3543                 return;
3544
3545         spin_lock_bh(&bp->phy_lock);
3546
3547         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3548                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3549         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3550 #ifdef BCM_VLAN
3551         if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3552                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3553 #else
3554         if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
3555                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3556 #endif
3557         if (dev->flags & IFF_PROMISC) {
3558                 /* Promiscuous mode. */
3559                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3560                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3561                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3562         }
3563         else if (dev->flags & IFF_ALLMULTI) {
3564                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3565                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3566                                0xffffffff);
3567                 }
3568                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3569         }
3570         else {
3571                 /* Accept one or more multicast(s). */
3572                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3573                 u32 regidx;
3574                 u32 bit;
3575                 u32 crc;
3576
3577                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3578
3579                 netdev_for_each_mc_addr(ha, dev) {
3580                         crc = ether_crc_le(ETH_ALEN, ha->addr);
3581                         bit = crc & 0xff;
3582                         regidx = (bit & 0xe0) >> 5;
3583                         bit &= 0x1f;
3584                         mc_filter[regidx] |= (1 << bit);
3585                 }
3586
3587                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3588                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3589                                mc_filter[i]);
3590                 }
3591
3592                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3593         }
3594
3595         if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3596                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3597                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3598                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3599         } else if (!(dev->flags & IFF_PROMISC)) {
3600                 /* Add all entries into to the match filter list */
3601                 i = 0;
3602                 netdev_for_each_uc_addr(ha, dev) {
3603                         bnx2_set_mac_addr(bp, ha->addr,
3604                                           i + BNX2_START_UNICAST_ADDRESS_INDEX);
3605                         sort_mode |= (1 <<
3606                                       (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3607                         i++;
3608                 }
3609
3610         }
3611
3612         if (rx_mode != bp->rx_mode) {
3613                 bp->rx_mode = rx_mode;
3614                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3615         }
3616
3617         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3618         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3619         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3620
3621         spin_unlock_bh(&bp->phy_lock);
3622 }
3623
3624 static int __devinit
3625 check_fw_section(const struct firmware *fw,
3626                  const struct bnx2_fw_file_section *section,
3627                  u32 alignment, bool non_empty)
3628 {
3629         u32 offset = be32_to_cpu(section->offset);
3630         u32 len = be32_to_cpu(section->len);
3631
3632         if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3633                 return -EINVAL;
3634         if ((non_empty && len == 0) || len > fw->size - offset ||
3635             len & (alignment - 1))
3636                 return -EINVAL;
3637         return 0;
3638 }
3639
3640 static int __devinit
3641 check_mips_fw_entry(const struct firmware *fw,
3642                     const struct bnx2_mips_fw_file_entry *entry)
3643 {
3644         if (check_fw_section(fw, &entry->text, 4, true) ||
3645             check_fw_section(fw, &entry->data, 4, false) ||
3646             check_fw_section(fw, &entry->rodata, 4, false))
3647                 return -EINVAL;
3648         return 0;
3649 }
3650
3651 static int __devinit
3652 bnx2_request_firmware(struct bnx2 *bp)
3653 {
3654         const char *mips_fw_file, *rv2p_fw_file;
3655         const struct bnx2_mips_fw_file *mips_fw;
3656         const struct bnx2_rv2p_fw_file *rv2p_fw;
3657         int rc;
3658
3659         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3660                 mips_fw_file = FW_MIPS_FILE_09;
3661                 if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3662                     (CHIP_ID(bp) == CHIP_ID_5709_A1))
3663                         rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3664                 else
3665                         rv2p_fw_file = FW_RV2P_FILE_09;
3666         } else {
3667                 mips_fw_file = FW_MIPS_FILE_06;
3668                 rv2p_fw_file = FW_RV2P_FILE_06;
3669         }
3670
3671         rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3672         if (rc) {
3673                 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3674                 return rc;
3675         }
3676
3677         rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3678         if (rc) {
3679                 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3680                 return rc;
3681         }
3682         mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3683         rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3684         if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3685             check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3686             check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3687             check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3688             check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3689             check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3690                 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3691                 return -EINVAL;
3692         }
3693         if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3694             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3695             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3696                 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3697                 return -EINVAL;
3698         }
3699
3700         return 0;
3701 }
3702
3703 static u32
3704 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3705 {
3706         switch (idx) {
3707         case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3708                 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3709                 rv2p_code |= RV2P_BD_PAGE_SIZE;
3710                 break;
3711         }
3712         return rv2p_code;
3713 }
3714
3715 static int
3716 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3717              const struct bnx2_rv2p_fw_file_entry *fw_entry)
3718 {
3719         u32 rv2p_code_len, file_offset;
3720         __be32 *rv2p_code;
3721         int i;
3722         u32 val, cmd, addr;
3723
3724         rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3725         file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3726
3727         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3728
3729         if (rv2p_proc == RV2P_PROC1) {
3730                 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3731                 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3732         } else {
3733                 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3734                 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3735         }
3736
3737         for (i = 0; i < rv2p_code_len; i += 8) {
3738                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3739                 rv2p_code++;
3740                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3741                 rv2p_code++;
3742
3743                 val = (i / 8) | cmd;
3744                 REG_WR(bp, addr, val);
3745         }
3746
3747         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3748         for (i = 0; i < 8; i++) {
3749                 u32 loc, code;
3750
3751                 loc = be32_to_cpu(fw_entry->fixup[i]);
3752                 if (loc && ((loc * 4) < rv2p_code_len)) {
3753                         code = be32_to_cpu(*(rv2p_code + loc - 1));
3754                         REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3755                         code = be32_to_cpu(*(rv2p_code + loc));
3756                         code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3757                         REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3758
3759                         val = (loc / 2) | cmd;
3760                         REG_WR(bp, addr, val);
3761                 }
3762         }
3763
3764         /* Reset the processor, un-stall is done later. */
3765         if (rv2p_proc == RV2P_PROC1) {
3766                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3767         }
3768         else {
3769                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3770         }
3771
3772         return 0;
3773 }
3774
3775 static int
3776 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3777             const struct bnx2_mips_fw_file_entry *fw_entry)
3778 {
3779         u32 addr, len, file_offset;
3780         __be32 *data;
3781         u32 offset;
3782         u32 val;
3783
3784         /* Halt the CPU. */
3785         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3786         val |= cpu_reg->mode_value_halt;
3787         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3788         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3789
3790         /* Load the Text area. */
3791         addr = be32_to_cpu(fw_entry->text.addr);
3792         len = be32_to_cpu(fw_entry->text.len);
3793         file_offset = be32_to_cpu(fw_entry->text.offset);
3794         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3795
3796         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3797         if (len) {
3798                 int j;
3799
3800                 for (j = 0; j < (len / 4); j++, offset += 4)
3801                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3802         }
3803
3804         /* Load the Data area. */
3805         addr = be32_to_cpu(fw_entry->data.addr);
3806         len = be32_to_cpu(fw_entry->data.len);
3807         file_offset = be32_to_cpu(fw_entry->data.offset);
3808         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3809
3810         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3811         if (len) {
3812                 int j;
3813
3814                 for (j = 0; j < (len / 4); j++, offset += 4)
3815                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3816         }
3817
3818         /* Load the Read-Only area. */
3819         addr = be32_to_cpu(fw_entry->rodata.addr);
3820         len = be32_to_cpu(fw_entry->rodata.len);
3821         file_offset = be32_to_cpu(fw_entry->rodata.offset);
3822         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3823
3824         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3825         if (len) {
3826                 int j;
3827
3828                 for (j = 0; j < (len / 4); j++, offset += 4)
3829                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3830         }
3831
3832         /* Clear the pre-fetch instruction. */
3833         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3834
3835         val = be32_to_cpu(fw_entry->start_addr);
3836         bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3837
3838         /* Start the CPU. */
3839         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3840         val &= ~cpu_reg->mode_value_halt;
3841         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3842         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3843
3844         return 0;
3845 }
3846
3847 static int
3848 bnx2_init_cpus(struct bnx2 *bp)
3849 {
3850         const struct bnx2_mips_fw_file *mips_fw =
3851                 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3852         const struct bnx2_rv2p_fw_file *rv2p_fw =
3853                 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3854         int rc;
3855
3856         /* Initialize the RV2P processor. */
3857         load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3858         load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3859
3860         /* Initialize the RX Processor. */
3861         rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3862         if (rc)
3863                 goto init_cpu_err;
3864
3865         /* Initialize the TX Processor. */
3866         rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3867         if (rc)
3868                 goto init_cpu_err;
3869
3870         /* Initialize the TX Patch-up Processor. */
3871         rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3872         if (rc)
3873                 goto init_cpu_err;
3874
3875         /* Initialize the Completion Processor. */
3876         rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3877         if (rc)
3878                 goto init_cpu_err;
3879
3880         /* Initialize the Command Processor. */
3881         rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3882
3883 init_cpu_err:
3884         return rc;
3885 }
3886
3887 static int
3888 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3889 {
3890         u16 pmcsr;
3891
3892         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3893
3894         switch (state) {
3895         case PCI_D0: {
3896                 u32 val;
3897
3898                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3899                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3900                         PCI_PM_CTRL_PME_STATUS);
3901
3902                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3903                         /* delay required during transition out of D3hot */
3904                         msleep(20);
3905
3906                 val = REG_RD(bp, BNX2_EMAC_MODE);
3907                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3908                 val &= ~BNX2_EMAC_MODE_MPKT;
3909                 REG_WR(bp, BNX2_EMAC_MODE, val);
3910
3911                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3912                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3913                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3914                 break;
3915         }
3916         case PCI_D3hot: {
3917                 int i;
3918                 u32 val, wol_msg;
3919
3920                 if (bp->wol) {
3921                         u32 advertising;
3922                         u8 autoneg;
3923
3924                         autoneg = bp->autoneg;
3925                         advertising = bp->advertising;
3926
3927                         if (bp->phy_port == PORT_TP) {
3928                                 bp->autoneg = AUTONEG_SPEED;
3929                                 bp->advertising = ADVERTISED_10baseT_Half |
3930                                         ADVERTISED_10baseT_Full |
3931                                         ADVERTISED_100baseT_Half |
3932                                         ADVERTISED_100baseT_Full |
3933                                         ADVERTISED_Autoneg;
3934                         }
3935
3936                         spin_lock_bh(&bp->phy_lock);
3937                         bnx2_setup_phy(bp, bp->phy_port);
3938                         spin_unlock_bh(&bp->phy_lock);
3939
3940                         bp->autoneg = autoneg;
3941                         bp->advertising = advertising;
3942
3943                         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3944
3945                         val = REG_RD(bp, BNX2_EMAC_MODE);
3946
3947                         /* Enable port mode. */
3948                         val &= ~BNX2_EMAC_MODE_PORT;
3949                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3950                                BNX2_EMAC_MODE_ACPI_RCVD |
3951                                BNX2_EMAC_MODE_MPKT;
3952                         if (bp->phy_port == PORT_TP)
3953                                 val |= BNX2_EMAC_MODE_PORT_MII;
3954                         else {
3955                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3956                                 if (bp->line_speed == SPEED_2500)
3957                                         val |= BNX2_EMAC_MODE_25G_MODE;
3958                         }
3959
3960                         REG_WR(bp, BNX2_EMAC_MODE, val);
3961
3962                         /* receive all multicast */
3963                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3964                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3965                                        0xffffffff);
3966                         }
3967                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3968                                BNX2_EMAC_RX_MODE_SORT_MODE);
3969
3970                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3971                               BNX2_RPM_SORT_USER0_MC_EN;
3972                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3973                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3974                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3975                                BNX2_RPM_SORT_USER0_ENA);
3976
3977                         /* Need to enable EMAC and RPM for WOL. */
3978                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3979                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3980                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3981                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3982
3983                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3984                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3985                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3986
3987                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3988                 }
3989                 else {
3990                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3991                 }
3992
3993                 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3994                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3995                                      1, 0);
3996
3997                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3998                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3999                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
4000
4001                         if (bp->wol)
4002                                 pmcsr |= 3;
4003                 }
4004                 else {
4005                         pmcsr |= 3;
4006                 }
4007                 if (bp->wol) {
4008                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
4009                 }
4010                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
4011                                       pmcsr);
4012
4013                 /* No more memory access after this point until
4014                  * device is brought back to D0.
4015                  */
4016                 udelay(50);
4017                 break;
4018         }
4019         default:
4020                 return -EINVAL;
4021         }
4022         return 0;
4023 }
4024
4025 static int
4026 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4027 {
4028         u32 val;
4029         int j;
4030
4031         /* Request access to the flash interface. */
4032         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4033         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4034                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4035                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4036                         break;
4037
4038                 udelay(5);
4039         }
4040
4041         if (j >= NVRAM_TIMEOUT_COUNT)
4042                 return -EBUSY;
4043
4044         return 0;
4045 }
4046
4047 static int
4048 bnx2_release_nvram_lock(struct bnx2 *bp)
4049 {
4050         int j;
4051         u32 val;
4052
4053         /* Relinquish nvram interface. */
4054         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4055
4056         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4057                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4058                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4059                         break;
4060
4061                 udelay(5);
4062         }
4063
4064         if (j >= NVRAM_TIMEOUT_COUNT)
4065                 return -EBUSY;
4066
4067         return 0;
4068 }
4069
4070
4071 static int
4072 bnx2_enable_nvram_write(struct bnx2 *bp)
4073 {
4074         u32 val;
4075
4076         val = REG_RD(bp, BNX2_MISC_CFG);
4077         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4078
4079         if (bp->flash_info->flags & BNX2_NV_WREN) {
4080                 int j;
4081
4082                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4083                 REG_WR(bp, BNX2_NVM_COMMAND,
4084                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4085
4086                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4087                         udelay(5);
4088
4089                         val = REG_RD(bp, BNX2_NVM_COMMAND);
4090                         if (val & BNX2_NVM_COMMAND_DONE)
4091                                 break;
4092                 }
4093
4094                 if (j >= NVRAM_TIMEOUT_COUNT)
4095                         return -EBUSY;
4096         }
4097         return 0;
4098 }
4099
4100 static void
4101 bnx2_disable_nvram_write(struct bnx2 *bp)
4102 {
4103         u32 val;
4104
4105         val = REG_RD(bp, BNX2_MISC_CFG);
4106         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4107 }
4108
4109
4110 static void
4111 bnx2_enable_nvram_access(struct bnx2 *bp)
4112 {
4113         u32 val;
4114
4115         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4116         /* Enable both bits, even on read. */
4117         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4118                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4119 }
4120
4121 static void
4122 bnx2_disable_nvram_access(struct bnx2 *bp)
4123 {
4124         u32 val;
4125
4126         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4127         /* Disable both bits, even after read. */
4128         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4129                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4130                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
4131 }
4132
4133 static int
4134 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4135 {
4136         u32 cmd;
4137         int j;
4138
4139         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4140                 /* Buffered flash, no erase needed */
4141                 return 0;
4142
4143         /* Build an erase command */
4144         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4145               BNX2_NVM_COMMAND_DOIT;
4146
4147         /* Need to clear DONE bit separately. */
4148         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4149
4150         /* Address of the NVRAM to read from. */
4151         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4152
4153         /* Issue an erase command. */
4154         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4155
4156         /* Wait for completion. */
4157         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4158                 u32 val;
4159
4160                 udelay(5);
4161
4162                 val = REG_RD(bp, BNX2_NVM_COMMAND);
4163                 if (val & BNX2_NVM_COMMAND_DONE)
4164                         break;
4165         }
4166
4167         if (j >= NVRAM_TIMEOUT_COUNT)
4168                 return -EBUSY;
4169
4170         return 0;
4171 }
4172
4173 static int
4174 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4175 {
4176         u32 cmd;
4177         int j;
4178
4179         /* Build the command word. */
4180         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4181
4182         /* Calculate an offset of a buffered flash, not needed for 5709. */
4183         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4184                 offset = ((offset / bp->flash_info->page_size) <<
4185                            bp->flash_info->page_bits) +
4186                           (offset % bp->flash_info->page_size);
4187         }
4188
4189         /* Need to clear DONE bit separately. */
4190         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4191
4192         /* Address of the NVRAM to read from. */
4193         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4194
4195         /* Issue a read command. */
4196         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4197
4198         /* Wait for completion. */
4199         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4200                 u32 val;
4201
4202                 udelay(5);
4203
4204                 val = REG_RD(bp, BNX2_NVM_COMMAND);
4205                 if (val & BNX2_NVM_COMMAND_DONE) {
4206                         __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4207                         memcpy(ret_val, &v, 4);
4208                         break;
4209                 }
4210         }
4211         if (j >= NVRAM_TIMEOUT_COUNT)
4212                 return -EBUSY;
4213
4214         return 0;
4215 }
4216
4217
4218 static int
4219 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4220 {
4221         u32 cmd;
4222         __be32 val32;
4223         int j;
4224
4225         /* Build the command word. */
4226         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4227
4228         /* Calculate an offset of a buffered flash, not needed for 5709. */
4229         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4230                 offset = ((offset / bp->flash_info->page_size) <<
4231                           bp->flash_info->page_bits) +
4232                          (offset % bp->flash_info->page_size);
4233         }
4234
4235         /* Need to clear DONE bit separately. */
4236         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4237
4238         memcpy(&val32, val, 4);
4239
4240         /* Write the data. */
4241         REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4242
4243         /* Address of the NVRAM to write to. */
4244         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4245
4246         /* Issue the write command. */
4247         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4248
4249         /* Wait for completion. */
4250         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4251                 udelay(5);
4252
4253                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4254                         break;
4255         }
4256         if (j >= NVRAM_TIMEOUT_COUNT)
4257                 return -EBUSY;
4258
4259         return 0;
4260 }
4261
4262 static int
4263 bnx2_init_nvram(struct bnx2 *bp)
4264 {
4265         u32 val;
4266         int j, entry_count, rc = 0;
4267         const struct flash_spec *flash;
4268
4269         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4270                 bp->flash_info = &flash_5709;
4271                 goto get_flash_size;
4272         }
4273
4274         /* Determine the selected interface. */
4275         val = REG_RD(bp, BNX2_NVM_CFG1);
4276
4277         entry_count = ARRAY_SIZE(flash_table);
4278
4279         if (val & 0x40000000) {
4280
4281                 /* Flash interface has been reconfigured */
4282                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4283                      j++, flash++) {
4284                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
4285                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4286                                 bp->flash_info = flash;
4287                                 break;
4288                         }
4289                 }
4290         }
4291         else {
4292                 u32 mask;
4293                 /* Not yet been reconfigured */
4294
4295                 if (val & (1 << 23))
4296                         mask = FLASH_BACKUP_STRAP_MASK;
4297                 else
4298                         mask = FLASH_STRAP_MASK;
4299
4300                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4301                         j++, flash++) {
4302
4303                         if ((val & mask) == (flash->strapping & mask)) {
4304                                 bp->flash_info = flash;
4305
4306                                 /* Request access to the flash interface. */
4307                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4308                                         return rc;
4309
4310                                 /* Enable access to flash interface */
4311                                 bnx2_enable_nvram_access(bp);
4312
4313                                 /* Reconfigure the flash interface */
4314                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4315                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4316                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4317                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4318
4319                                 /* Disable access to flash interface */
4320                                 bnx2_disable_nvram_access(bp);
4321                                 bnx2_release_nvram_lock(bp);
4322
4323                                 break;
4324                         }
4325                 }
4326         } /* if (val & 0x40000000) */
4327
4328         if (j == entry_count) {
4329                 bp->flash_info = NULL;
4330                 pr_alert("Unknown flash/EEPROM type\n");
4331                 return -ENODEV;
4332         }
4333
4334 get_flash_size:
4335         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4336         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4337         if (val)
4338                 bp->flash_size = val;
4339         else
4340                 bp->flash_size = bp->flash_info->total_size;
4341
4342         return rc;
4343 }
4344
4345 static int
4346 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4347                 int buf_size)
4348 {
4349         int rc = 0;
4350         u32 cmd_flags, offset32, len32, extra;
4351
4352         if (buf_size == 0)
4353                 return 0;
4354
4355         /* Request access to the flash interface. */
4356         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4357                 return rc;
4358
4359         /* Enable access to flash interface */
4360         bnx2_enable_nvram_access(bp);
4361
4362         len32 = buf_size;
4363         offset32 = offset;
4364         extra = 0;
4365
4366         cmd_flags = 0;
4367
4368         if (offset32 & 3) {
4369                 u8 buf[4];
4370                 u32 pre_len;
4371
4372                 offset32 &= ~3;
4373                 pre_len = 4 - (offset & 3);
4374
4375                 if (pre_len >= len32) {
4376                         pre_len = len32;
4377                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4378                                     BNX2_NVM_COMMAND_LAST;
4379                 }
4380                 else {
4381                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4382                 }
4383
4384                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4385
4386                 if (rc)
4387                         return rc;
4388
4389                 memcpy(ret_buf, buf + (offset & 3), pre_len);
4390
4391                 offset32 += 4;
4392                 ret_buf += pre_len;
4393                 len32 -= pre_len;
4394         }
4395         if (len32 & 3) {
4396                 extra = 4 - (len32 & 3);
4397                 len32 = (len32 + 4) & ~3;
4398         }
4399
4400         if (len32 == 4) {
4401                 u8 buf[4];
4402
4403                 if (cmd_flags)
4404                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4405                 else
4406                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4407                                     BNX2_NVM_COMMAND_LAST;
4408
4409                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4410
4411                 memcpy(ret_buf, buf, 4 - extra);
4412         }
4413         else if (len32 > 0) {
4414                 u8 buf[4];
4415
4416                 /* Read the first word. */
4417                 if (cmd_flags)
4418                         cmd_flags = 0;
4419                 else
4420                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4421
4422                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4423
4424                 /* Advance to the next dword. */
4425                 offset32 += 4;
4426                 ret_buf += 4;
4427                 len32 -= 4;
4428
4429                 while (len32 > 4 && rc == 0) {
4430                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4431
4432                         /* Advance to the next dword. */
4433                         offset32 += 4;
4434                         ret_buf += 4;
4435                         len32 -= 4;
4436                 }
4437
4438                 if (rc)
4439                         return rc;
4440
4441                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4442                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4443
4444                 memcpy(ret_buf, buf, 4 - extra);
4445         }
4446
4447         /* Disable access to flash interface */
4448         bnx2_disable_nvram_access(bp);
4449
4450         bnx2_release_nvram_lock(bp);
4451
4452         return rc;
4453 }
4454
4455 static int
4456 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4457                 int buf_size)
4458 {
4459         u32 written, offset32, len32;
4460         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4461         int rc = 0;
4462         int align_start, align_end;
4463
4464         buf = data_buf;
4465         offset32 = offset;
4466         len32 = buf_size;
4467         align_start = align_end = 0;
4468
4469         if ((align_start = (offset32 & 3))) {
4470                 offset32 &= ~3;
4471                 len32 += align_start;
4472                 if (len32 < 4)
4473                         len32 = 4;
4474                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4475                         return rc;
4476         }
4477
4478         if (len32 & 3) {
4479                 align_end = 4 - (len32 & 3);
4480                 len32 += align_end;
4481                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4482                         return rc;
4483         }
4484
4485         if (align_start || align_end) {
4486                 align_buf = kmalloc(len32, GFP_KERNEL);
4487                 if (align_buf == NULL)
4488                         return -ENOMEM;
4489                 if (align_start) {
4490                         memcpy(align_buf, start, 4);
4491                 }
4492                 if (align_end) {
4493                         memcpy(align_buf + len32 - 4, end, 4);
4494                 }
4495                 memcpy(align_buf + align_start, data_buf, buf_size);
4496                 buf = align_buf;
4497         }
4498
4499         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4500                 flash_buffer = kmalloc(264, GFP_KERNEL);
4501                 if (flash_buffer == NULL) {
4502                         rc = -ENOMEM;
4503                         goto nvram_write_end;
4504                 }
4505         }
4506
4507         written = 0;
4508         while ((written < len32) && (rc == 0)) {
4509                 u32 page_start, page_end, data_start, data_end;
4510                 u32 addr, cmd_flags;
4511                 int i;
4512
4513                 /* Find the page_start addr */
4514                 page_start = offset32 + written;
4515                 page_start -= (page_start % bp->flash_info->page_size);
4516                 /* Find the page_end addr */
4517                 page_end = page_start + bp->flash_info->page_size;
4518                 /* Find the data_start addr */
4519                 data_start = (written == 0) ? offset32 : page_start;
4520                 /* Find the data_end addr */
4521                 data_end = (page_end > offset32 + len32) ?
4522                         (offset32 + len32) : page_end;
4523
4524                 /* Request access to the flash interface. */
4525                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4526                         goto nvram_write_end;
4527
4528                 /* Enable access to flash interface */
4529                 bnx2_enable_nvram_access(bp);
4530
4531                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4532                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4533                         int j;
4534
4535                         /* Read the whole page into the buffer
4536                          * (non-buffer flash only) */
4537                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4538                                 if (j == (bp->flash_info->page_size - 4)) {
4539                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4540                                 }
4541                                 rc = bnx2_nvram_read_dword(bp,
4542                                         page_start + j,
4543                                         &flash_buffer[j],
4544                                         cmd_flags);
4545
4546                                 if (rc)
4547                                         goto nvram_write_end;
4548
4549                                 cmd_flags = 0;
4550                         }
4551                 }
4552
4553                 /* Enable writes to flash interface (unlock write-protect) */
4554                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4555                         goto nvram_write_end;
4556
4557                 /* Loop to write back the buffer data from page_start to
4558                  * data_start */
4559                 i = 0;
4560                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4561                         /* Erase the page */
4562                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4563                                 goto nvram_write_end;
4564
4565                         /* Re-enable the write again for the actual write */
4566                         bnx2_enable_nvram_write(bp);
4567
4568                         for (addr = page_start; addr < data_start;
4569                                 addr += 4, i += 4) {
4570
4571                                 rc = bnx2_nvram_write_dword(bp, addr,
4572                                         &flash_buffer[i], cmd_flags);
4573
4574                                 if (rc != 0)
4575                                         goto nvram_write_end;
4576
4577                                 cmd_flags = 0;
4578                         }
4579                 }
4580
4581                 /* Loop to write the new data from data_start to data_end */
4582                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4583                         if ((addr == page_end - 4) ||
4584                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4585                                  (addr == data_end - 4))) {
4586
4587                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4588                         }
4589                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4590                                 cmd_flags);
4591
4592                         if (rc != 0)
4593                                 goto nvram_write_end;
4594
4595                         cmd_flags = 0;
4596                         buf += 4;
4597                 }
4598
4599                 /* Loop to write back the buffer data from data_end
4600                  * to page_end */
4601                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4602                         for (addr = data_end; addr < page_end;
4603                                 addr += 4, i += 4) {
4604
4605                                 if (addr == page_end-4) {
4606                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4607                                 }
4608                                 rc = bnx2_nvram_write_dword(bp, addr,
4609                                         &flash_buffer[i], cmd_flags);
4610
4611                                 if (rc != 0)
4612                                         goto nvram_write_end;
4613
4614                                 cmd_flags = 0;
4615                         }
4616                 }
4617
4618                 /* Disable writes to flash interface (lock write-protect) */
4619                 bnx2_disable_nvram_write(bp);
4620
4621                 /* Disable access to flash interface */
4622                 bnx2_disable_nvram_access(bp);
4623                 bnx2_release_nvram_lock(bp);
4624
4625                 /* Increment written */
4626                 written += data_end - data_start;
4627         }
4628
4629 nvram_write_end:
4630         kfree(flash_buffer);
4631         kfree(align_buf);
4632         return rc;
4633 }
4634
4635 static void
4636 bnx2_init_fw_cap(struct bnx2 *bp)
4637 {
4638         u32 val, sig = 0;
4639
4640         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4641         bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4642
4643         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4644                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4645
4646         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4647         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4648                 return;
4649
4650         if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4651                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4652                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4653         }
4654
4655         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4656             (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4657                 u32 link;
4658
4659                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4660
4661                 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4662                 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4663                         bp->phy_port = PORT_FIBRE;
4664                 else
4665                         bp->phy_port = PORT_TP;
4666
4667                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4668                        BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4669         }
4670
4671         if (netif_running(bp->dev) && sig)
4672                 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4673 }
4674
4675 static void
4676 bnx2_setup_msix_tbl(struct bnx2 *bp)
4677 {
4678         REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4679
4680         REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4681         REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4682 }
4683
4684 static int
4685 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4686 {
4687         u32 val;
4688         int i, rc = 0;
4689         u8 old_port;
4690
4691         /* Wait for the current PCI transaction to complete before
4692          * issuing a reset. */
4693         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4694                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4695                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4696                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4697                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4698         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4699         udelay(5);
4700
4701         /* Wait for the firmware to tell us it is ok to issue a reset. */
4702         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4703
4704         /* Deposit a driver reset signature so the firmware knows that
4705          * this is a soft reset. */
4706         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4707                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4708
4709         /* Do a dummy read to force the chip to complete all current transaction
4710          * before we issue a reset. */
4711         val = REG_RD(bp, BNX2_MISC_ID);
4712
4713         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4714                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4715                 REG_RD(bp, BNX2_MISC_COMMAND);
4716                 udelay(5);
4717
4718                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4719                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4720
4721                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4722
4723         } else {
4724                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4725                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4726                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4727
4728                 /* Chip reset. */
4729                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4730
4731                 /* Reading back any register after chip reset will hang the
4732                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4733                  * of margin for write posting.
4734                  */
4735                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4736                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4737                         msleep(20);
4738
4739                 /* Reset takes approximate 30 usec */
4740                 for (i = 0; i < 10; i++) {
4741                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4742                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4743                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4744                                 break;
4745                         udelay(10);
4746                 }
4747
4748                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4749                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4750                         pr_err("Chip reset did not complete\n");
4751                         return -EBUSY;
4752                 }
4753         }
4754
4755         /* Make sure byte swapping is properly configured. */
4756         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4757         if (val != 0x01020304) {
4758                 pr_err("Chip not in correct endian mode\n");
4759                 return -ENODEV;
4760         }
4761
4762         /* Wait for the firmware to finish its initialization. */
4763         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4764         if (rc)
4765                 return rc;
4766
4767         spin_lock_bh(&bp->phy_lock);
4768         old_port = bp->phy_port;
4769         bnx2_init_fw_cap(bp);
4770         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4771             old_port != bp->phy_port)
4772                 bnx2_set_default_remote_link(bp);
4773         spin_unlock_bh(&bp->phy_lock);
4774
4775         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4776                 /* Adjust the voltage regular to two steps lower.  The default
4777                  * of this register is 0x0000000e. */
4778                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4779
4780                 /* Remove bad rbuf memory from the free pool. */
4781                 rc = bnx2_alloc_bad_rbuf(bp);
4782         }
4783
4784         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4785                 bnx2_setup_msix_tbl(bp);
4786                 /* Prevent MSIX table reads and write from timing out */
4787                 REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
4788                         BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4789         }
4790
4791         return rc;
4792 }
4793
4794 static int
4795 bnx2_init_chip(struct bnx2 *bp)
4796 {
4797         u32 val, mtu;
4798         int rc, i;
4799
4800         /* Make sure the interrupt is not active. */
4801         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4802
4803         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4804               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4805 #ifdef __BIG_ENDIAN
4806               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4807 #endif
4808               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4809               DMA_READ_CHANS << 12 |
4810               DMA_WRITE_CHANS << 16;
4811
4812         val |= (0x2 << 20) | (1 << 11);
4813
4814         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4815                 val |= (1 << 23);
4816
4817         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4818             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4819                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4820
4821         REG_WR(bp, BNX2_DMA_CONFIG, val);
4822
4823         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4824                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4825                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4826                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4827         }
4828
4829         if (bp->flags & BNX2_FLAG_PCIX) {
4830                 u16 val16;
4831
4832                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4833                                      &val16);
4834                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4835                                       val16 & ~PCI_X_CMD_ERO);
4836         }
4837
4838         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4839                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4840                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4841                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4842
4843         /* Initialize context mapping and zero out the quick contexts.  The
4844          * context block must have already been enabled. */
4845         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4846                 rc = bnx2_init_5709_context(bp);
4847                 if (rc)
4848                         return rc;
4849         } else
4850                 bnx2_init_context(bp);
4851
4852         if ((rc = bnx2_init_cpus(bp)) != 0)
4853                 return rc;
4854
4855         bnx2_init_nvram(bp);
4856
4857         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4858
4859         val = REG_RD(bp, BNX2_MQ_CONFIG);
4860         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4861         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4862         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4863                 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4864                 if (CHIP_REV(bp) == CHIP_REV_Ax)
4865                         val |= BNX2_MQ_CONFIG_HALT_DIS;
4866         }
4867
4868         REG_WR(bp, BNX2_MQ_CONFIG, val);
4869
4870         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4871         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4872         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4873
4874         val = (BCM_PAGE_BITS - 8) << 24;
4875         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4876
4877         /* Configure page size. */
4878         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4879         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4880         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4881         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4882
4883         val = bp->mac_addr[0] +
4884               (bp->mac_addr[1] << 8) +
4885               (bp->mac_addr[2] << 16) +
4886               bp->mac_addr[3] +
4887               (bp->mac_addr[4] << 8) +
4888               (bp->mac_addr[5] << 16);
4889         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4890
4891         /* Program the MTU.  Also include 4 bytes for CRC32. */
4892         mtu = bp->dev->mtu;
4893         val = mtu + ETH_HLEN + ETH_FCS_LEN;
4894         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4895                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4896         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4897
4898         if (mtu < 1500)
4899                 mtu = 1500;
4900
4901         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4902         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4903         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4904
4905         memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4906         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4907                 bp->bnx2_napi[i].last_status_idx = 0;
4908
4909         bp->idle_chk_status_idx = 0xffff;
4910
4911         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4912
4913         /* Set up how to generate a link change interrupt. */
4914         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4915
4916         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4917                (u64) bp->status_blk_mapping & 0xffffffff);
4918         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4919
4920         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4921                (u64) bp->stats_blk_mapping & 0xffffffff);
4922         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4923                (u64) bp->stats_blk_mapping >> 32);
4924
4925         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4926                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4927
4928         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4929                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4930
4931         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4932                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4933
4934         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4935
4936         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4937
4938         REG_WR(bp, BNX2_HC_COM_TICKS,
4939                (bp->com_ticks_int << 16) | bp->com_ticks);
4940
4941         REG_WR(bp, BNX2_HC_CMD_TICKS,
4942                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4943
4944         if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4945                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4946         else
4947                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4948         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4949
4950         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4951                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4952         else {
4953                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4954                       BNX2_HC_CONFIG_COLLECT_STATS;
4955         }
4956
4957         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4958                 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4959                        BNX2_HC_MSIX_BIT_VECTOR_VAL);
4960
4961                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4962         }
4963
4964         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4965                 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
4966
4967         REG_WR(bp, BNX2_HC_CONFIG, val);
4968
4969         for (i = 1; i < bp->irq_nvecs; i++) {
4970                 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4971                            BNX2_HC_SB_CONFIG_1;
4972
4973                 REG_WR(bp, base,
4974                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4975                         BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4976                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4977
4978                 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4979                         (bp->tx_quick_cons_trip_int << 16) |
4980                          bp->tx_quick_cons_trip);
4981
4982                 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4983                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
4984
4985                 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4986                        (bp->rx_quick_cons_trip_int << 16) |
4987                         bp->rx_quick_cons_trip);
4988
4989                 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4990                         (bp->rx_ticks_int << 16) | bp->rx_ticks);
4991         }
4992
4993         /* Clear internal stats counters. */
4994         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4995
4996         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4997
4998         /* Initialize the receive filter. */
4999         bnx2_set_rx_mode(bp->dev);
5000
5001         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5002                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5003                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5004                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5005         }
5006         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5007                           1, 0);
5008
5009         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5010         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5011
5012         udelay(20);
5013
5014         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
5015
5016         return rc;
5017 }
5018
5019 static void
5020 bnx2_clear_ring_states(struct bnx2 *bp)
5021 {
5022         struct bnx2_napi *bnapi;
5023         struct bnx2_tx_ring_info *txr;
5024         struct bnx2_rx_ring_info *rxr;
5025         int i;
5026
5027         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5028                 bnapi = &bp->bnx2_napi[i];
5029                 txr = &bnapi->tx_ring;
5030                 rxr = &bnapi->rx_ring;
5031
5032                 txr->tx_cons = 0;
5033                 txr->hw_tx_cons = 0;
5034                 rxr->rx_prod_bseq = 0;
5035                 rxr->rx_prod = 0;
5036                 rxr->rx_cons = 0;
5037                 rxr->rx_pg_prod = 0;
5038                 rxr->rx_pg_cons = 0;
5039         }
5040 }
5041
5042 static void
5043 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5044 {
5045         u32 val, offset0, offset1, offset2, offset3;
5046         u32 cid_addr = GET_CID_ADDR(cid);
5047
5048         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5049                 offset0 = BNX2_L2CTX_TYPE_XI;
5050                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5051                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5052                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5053         } else {
5054                 offset0 = BNX2_L2CTX_TYPE;
5055                 offset1 = BNX2_L2CTX_CMD_TYPE;
5056                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5057                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5058         }
5059         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5060         bnx2_ctx_wr(bp, cid_addr, offset0, val);
5061
5062         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5063         bnx2_ctx_wr(bp, cid_addr, offset1, val);
5064
5065         val = (u64) txr->tx_desc_mapping >> 32;
5066         bnx2_ctx_wr(bp, cid_addr, offset2, val);
5067
5068         val = (u64) txr->tx_desc_mapping & 0xffffffff;
5069         bnx2_ctx_wr(bp, cid_addr, offset3, val);
5070 }
5071
5072 static void
5073 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5074 {
5075         struct tx_bd *txbd;
5076         u32 cid = TX_CID;
5077         struct bnx2_napi *bnapi;
5078         struct bnx2_tx_ring_info *txr;
5079
5080         bnapi = &bp->bnx2_napi[ring_num];
5081         txr = &bnapi->tx_ring;
5082
5083         if (ring_num == 0)
5084                 cid = TX_CID;
5085         else
5086                 cid = TX_TSS_CID + ring_num - 1;
5087
5088         bp->tx_wake_thresh = bp->tx_ring_size / 2;
5089
5090         txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
5091
5092         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5093         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5094
5095         txr->tx_prod = 0;
5096         txr->tx_prod_bseq = 0;
5097
5098         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5099         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5100
5101         bnx2_init_tx_context(bp, cid, txr);
5102 }
5103
5104 static void
5105 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5106                      int num_rings)
5107 {
5108         int i;
5109         struct rx_bd *rxbd;
5110
5111         for (i = 0; i < num_rings; i++) {
5112                 int j;
5113
5114                 rxbd = &rx_ring[i][0];
5115                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5116                         rxbd->rx_bd_len = buf_size;
5117                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5118                 }
5119                 if (i == (num_rings - 1))
5120                         j = 0;
5121                 else
5122                         j = i + 1;
5123                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5124                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5125         }
5126 }
5127
5128 static void
5129 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5130 {
5131         int i;
5132         u16 prod, ring_prod;
5133         u32 cid, rx_cid_addr, val;
5134         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5135         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5136
5137         if (ring_num == 0)
5138                 cid = RX_CID;
5139         else
5140                 cid = RX_RSS_CID + ring_num - 1;
5141
5142         rx_cid_addr = GET_CID_ADDR(cid);
5143
5144         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5145                              bp->rx_buf_use_size, bp->rx_max_ring);
5146
5147         bnx2_init_rx_context(bp, cid);
5148
5149         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5150                 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5151                 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5152         }
5153
5154         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5155         if (bp->rx_pg_ring_size) {
5156                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5157                                      rxr->rx_pg_desc_mapping,
5158                                      PAGE_SIZE, bp->rx_max_pg_ring);
5159                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5160                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5161                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5162                        BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5163
5164                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5165                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5166
5167                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5168                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5169
5170                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5171                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5172         }
5173
5174         val = (u64) rxr->rx_desc_mapping[0] >> 32;
5175         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5176
5177         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5178         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5179
5180         ring_prod = prod = rxr->rx_pg_prod;
5181         for (i = 0; i < bp->rx_pg_ring_size; i++) {
5182                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0) {
5183                         netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5184                                     ring_num, i, bp->rx_pg_ring_size);
5185                         break;
5186                 }
5187                 prod = NEXT_RX_BD(prod);
5188                 ring_prod = RX_PG_RING_IDX(prod);
5189         }
5190         rxr->rx_pg_prod = prod;
5191
5192         ring_prod = prod = rxr->rx_prod;
5193         for (i = 0; i < bp->rx_ring_size; i++) {
5194                 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0) {
5195                         netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5196                                     ring_num, i, bp->rx_ring_size);
5197                         break;
5198                 }
5199                 prod = NEXT_RX_BD(prod);
5200                 ring_prod = RX_RING_IDX(prod);
5201         }
5202         rxr->rx_prod = prod;
5203
5204         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5205         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5206         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5207
5208         REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5209         REG_WR16(bp, rxr->rx_bidx_addr, prod);
5210
5211         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5212 }
5213
5214 static void
5215 bnx2_init_all_rings(struct bnx2 *bp)
5216 {
5217         int i;
5218         u32 val;
5219
5220         bnx2_clear_ring_states(bp);
5221
5222         REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5223         for (i = 0; i < bp->num_tx_rings; i++)
5224                 bnx2_init_tx_ring(bp, i);
5225
5226         if (bp->num_tx_rings > 1)
5227                 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5228                        (TX_TSS_CID << 7));
5229
5230         REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5231         bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5232
5233         for (i = 0; i < bp->num_rx_rings; i++)
5234                 bnx2_init_rx_ring(bp, i);
5235
5236         if (bp->num_rx_rings > 1) {
5237                 u32 tbl_32;
5238                 u8 *tbl = (u8 *) &tbl_32;
5239
5240                 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
5241                                 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
5242
5243                 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5244                         tbl[i % 4] = i % (bp->num_rx_rings - 1);
5245                         if ((i % 4) == 3)
5246                                 bnx2_reg_wr_ind(bp,
5247                                                 BNX2_RXP_SCRATCH_RSS_TBL + i,
5248                                                 cpu_to_be32(tbl_32));
5249                 }
5250
5251                 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5252                       BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5253
5254                 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5255
5256         }
5257 }
5258
5259 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5260 {
5261         u32 max, num_rings = 1;
5262
5263         while (ring_size > MAX_RX_DESC_CNT) {
5264                 ring_size -= MAX_RX_DESC_CNT;
5265                 num_rings++;
5266         }
5267         /* round to next power of 2 */
5268         max = max_size;
5269         while ((max & num_rings) == 0)
5270                 max >>= 1;
5271
5272         if (num_rings != max)
5273                 max <<= 1;
5274
5275         return max;
5276 }
5277
5278 static void
5279 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5280 {
5281         u32 rx_size, rx_space, jumbo_size;
5282
5283         /* 8 for CRC and VLAN */
5284         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5285
5286         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5287                 sizeof(struct skb_shared_info);
5288
5289         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5290         bp->rx_pg_ring_size = 0;
5291         bp->rx_max_pg_ring = 0;
5292         bp->rx_max_pg_ring_idx = 0;
5293         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5294                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5295
5296                 jumbo_size = size * pages;
5297                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5298                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5299
5300                 bp->rx_pg_ring_size = jumbo_size;
5301                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5302                                                         MAX_RX_PG_RINGS);
5303                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5304                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5305                 bp->rx_copy_thresh = 0;
5306         }
5307
5308         bp->rx_buf_use_size = rx_size;
5309         /* hw alignment */
5310         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5311         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5312         bp->rx_ring_size = size;
5313         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5314         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5315 }
5316
5317 static void
5318 bnx2_free_tx_skbs(struct bnx2 *bp)
5319 {
5320         int i;
5321
5322         for (i = 0; i < bp->num_tx_rings; i++) {
5323                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5324                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5325                 int j;
5326
5327                 if (txr->tx_buf_ring == NULL)
5328                         continue;
5329
5330                 for (j = 0; j < TX_DESC_CNT; ) {
5331                         struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5332                         struct sk_buff *skb = tx_buf->skb;
5333                         int k, last;
5334
5335                         if (skb == NULL) {
5336                                 j++;
5337                                 continue;
5338                         }
5339
5340                         pci_unmap_single(bp->pdev,
5341                                          dma_unmap_addr(tx_buf, mapping),
5342                                          skb_headlen(skb),
5343                                          PCI_DMA_TODEVICE);
5344
5345                         tx_buf->skb = NULL;
5346
5347                         last = tx_buf->nr_frags;
5348                         j++;
5349                         for (k = 0; k < last; k++, j++) {
5350                                 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5351                                 pci_unmap_page(bp->pdev,
5352                                         dma_unmap_addr(tx_buf, mapping),
5353                                         skb_shinfo(skb)->frags[k].size,
5354                                         PCI_DMA_TODEVICE);
5355                         }
5356                         dev_kfree_skb(skb);
5357                 }
5358         }
5359 }
5360
5361 static void
5362 bnx2_free_rx_skbs(struct bnx2 *bp)
5363 {
5364         int i;
5365
5366         for (i = 0; i < bp->num_rx_rings; i++) {
5367                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5368                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5369                 int j;
5370
5371                 if (rxr->rx_buf_ring == NULL)
5372                         return;
5373
5374                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5375                         struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5376                         struct sk_buff *skb = rx_buf->skb;
5377
5378                         if (skb == NULL)
5379                                 continue;
5380
5381                         pci_unmap_single(bp->pdev,
5382                                          dma_unmap_addr(rx_buf, mapping),
5383                                          bp->rx_buf_use_size,
5384                                          PCI_DMA_FROMDEVICE);
5385
5386                         rx_buf->skb = NULL;
5387
5388                         dev_kfree_skb(skb);
5389                 }
5390                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5391                         bnx2_free_rx_page(bp, rxr, j);
5392         }
5393 }
5394
5395 static void
5396 bnx2_free_skbs(struct bnx2 *bp)
5397 {
5398         bnx2_free_tx_skbs(bp);
5399         bnx2_free_rx_skbs(bp);
5400 }
5401
5402 static int
5403 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5404 {
5405         int rc;
5406
5407         rc = bnx2_reset_chip(bp, reset_code);
5408         bnx2_free_skbs(bp);
5409         if (rc)
5410                 return rc;
5411
5412         if ((rc = bnx2_init_chip(bp)) != 0)
5413                 return rc;
5414
5415         bnx2_init_all_rings(bp);
5416         return 0;
5417 }
5418
5419 static int
5420 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5421 {
5422         int rc;
5423
5424         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5425                 return rc;
5426
5427         spin_lock_bh(&bp->phy_lock);
5428         bnx2_init_phy(bp, reset_phy);
5429         bnx2_set_link(bp);
5430         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5431                 bnx2_remote_phy_event(bp);
5432         spin_unlock_bh(&bp->phy_lock);
5433         return 0;
5434 }
5435
5436 static int
5437 bnx2_shutdown_chip(struct bnx2 *bp)
5438 {
5439         u32 reset_code;
5440
5441         if (bp->flags & BNX2_FLAG_NO_WOL)
5442                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5443         else if (bp->wol)
5444                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5445         else
5446                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5447
5448         return bnx2_reset_chip(bp, reset_code);
5449 }
5450
5451 static int
5452 bnx2_test_registers(struct bnx2 *bp)
5453 {
5454         int ret;
5455         int i, is_5709;
5456         static const struct {
5457                 u16   offset;
5458                 u16   flags;
5459 #define BNX2_FL_NOT_5709        1
5460                 u32   rw_mask;
5461                 u32   ro_mask;
5462         } reg_tbl[] = {
5463                 { 0x006c, 0, 0x00000000, 0x0000003f },
5464                 { 0x0090, 0, 0xffffffff, 0x00000000 },
5465                 { 0x0094, 0, 0x00000000, 0x00000000 },
5466
5467                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5468                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5469                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5470                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5471                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5472                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5473                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5474                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5475                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5476
5477                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5478                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5479                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5480                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5481                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5482                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5483
5484                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5485                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5486                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5487
5488                 { 0x1000, 0, 0x00000000, 0x00000001 },
5489                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5490
5491                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5492                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5493                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5494                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5495                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5496                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5497                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5498                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5499                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5500                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5501
5502                 { 0x1800, 0, 0x00000000, 0x00000001 },
5503                 { 0x1804, 0, 0x00000000, 0x00000003 },
5504
5505                 { 0x2800, 0, 0x00000000, 0x00000001 },
5506                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5507                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5508                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5509                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5510                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5511                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5512                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5513                 { 0x2840, 0, 0x00000000, 0xffffffff },
5514                 { 0x2844, 0, 0x00000000, 0xffffffff },
5515                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5516                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5517
5518                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5519                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5520
5521                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5522                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5523                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5524                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5525                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5526                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5527                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5528                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5529                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5530
5531                 { 0x5004, 0, 0x00000000, 0x0000007f },
5532                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5533
5534                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5535                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5536                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5537                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5538                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5539                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5540                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5541                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5542                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5543
5544                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5545                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5546                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5547                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5548                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5549                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5550                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5551                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5552                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5553                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5554                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5555                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5556                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5557                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5558                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5559                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5560                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5561                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5562                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5563                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5564                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5565                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5566                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5567
5568                 { 0xffff, 0, 0x00000000, 0x00000000 },
5569         };
5570
5571         ret = 0;
5572         is_5709 = 0;
5573         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5574                 is_5709 = 1;
5575
5576         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5577                 u32 offset, rw_mask, ro_mask, save_val, val;
5578                 u16 flags = reg_tbl[i].flags;
5579
5580                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5581                         continue;
5582
5583                 offset = (u32) reg_tbl[i].offset;
5584                 rw_mask = reg_tbl[i].rw_mask;
5585                 ro_mask = reg_tbl[i].ro_mask;
5586
5587                 save_val = readl(bp->regview + offset);
5588
5589                 writel(0, bp->regview + offset);
5590
5591                 val = readl(bp->regview + offset);
5592                 if ((val & rw_mask) != 0) {
5593                         goto reg_test_err;
5594                 }
5595
5596                 if ((val & ro_mask) != (save_val & ro_mask)) {
5597                         goto reg_test_err;
5598                 }
5599
5600                 writel(0xffffffff, bp->regview + offset);
5601
5602                 val = readl(bp->regview + offset);
5603                 if ((val & rw_mask) != rw_mask) {
5604                         goto reg_test_err;
5605                 }
5606
5607                 if ((val & ro_mask) != (save_val & ro_mask)) {
5608                         goto reg_test_err;
5609                 }
5610
5611                 writel(save_val, bp->regview + offset);
5612                 continue;
5613
5614 reg_test_err:
5615                 writel(save_val, bp->regview + offset);
5616                 ret = -ENODEV;
5617                 break;
5618         }
5619         return ret;
5620 }
5621
5622 static int
5623 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5624 {
5625         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5626                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5627         int i;
5628
5629         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5630                 u32 offset;
5631
5632                 for (offset = 0; offset < size; offset += 4) {
5633
5634                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5635
5636                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5637                                 test_pattern[i]) {
5638                                 return -ENODEV;
5639                         }
5640                 }
5641         }
5642         return 0;
5643 }
5644
5645 static int
5646 bnx2_test_memory(struct bnx2 *bp)
5647 {
5648         int ret = 0;
5649         int i;
5650         static struct mem_entry {
5651                 u32   offset;
5652                 u32   len;
5653         } mem_tbl_5706[] = {
5654                 { 0x60000,  0x4000 },
5655                 { 0xa0000,  0x3000 },
5656                 { 0xe0000,  0x4000 },
5657                 { 0x120000, 0x4000 },
5658                 { 0x1a0000, 0x4000 },
5659                 { 0x160000, 0x4000 },
5660                 { 0xffffffff, 0    },
5661         },
5662         mem_tbl_5709[] = {
5663                 { 0x60000,  0x4000 },
5664                 { 0xa0000,  0x3000 },
5665                 { 0xe0000,  0x4000 },
5666                 { 0x120000, 0x4000 },
5667                 { 0x1a0000, 0x4000 },
5668                 { 0xffffffff, 0    },
5669         };
5670         struct mem_entry *mem_tbl;
5671
5672         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5673                 mem_tbl = mem_tbl_5709;
5674         else
5675                 mem_tbl = mem_tbl_5706;
5676
5677         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5678                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5679                         mem_tbl[i].len)) != 0) {
5680                         return ret;
5681                 }
5682         }
5683
5684         return ret;
5685 }
5686
5687 #define BNX2_MAC_LOOPBACK       0
5688 #define BNX2_PHY_LOOPBACK       1
5689
5690 static int
5691 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5692 {
5693         unsigned int pkt_size, num_pkts, i;
5694         struct sk_buff *skb, *rx_skb;
5695         unsigned char *packet;
5696         u16 rx_start_idx, rx_idx;
5697         dma_addr_t map;
5698         struct tx_bd *txbd;
5699         struct sw_bd *rx_buf;
5700         struct l2_fhdr *rx_hdr;
5701         int ret = -ENODEV;
5702         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5703         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5704         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5705
5706         tx_napi = bnapi;
5707
5708         txr = &tx_napi->tx_ring;
5709         rxr = &bnapi->rx_ring;
5710         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5711                 bp->loopback = MAC_LOOPBACK;
5712                 bnx2_set_mac_loopback(bp);
5713         }
5714         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5715                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5716                         return 0;
5717
5718                 bp->loopback = PHY_LOOPBACK;
5719                 bnx2_set_phy_loopback(bp);
5720         }
5721         else
5722                 return -EINVAL;
5723
5724         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5725         skb = netdev_alloc_skb(bp->dev, pkt_size);
5726         if (!skb)
5727                 return -ENOMEM;
5728         packet = skb_put(skb, pkt_size);
5729         memcpy(packet, bp->dev->dev_addr, 6);
5730         memset(packet + 6, 0x0, 8);
5731         for (i = 14; i < pkt_size; i++)
5732                 packet[i] = (unsigned char) (i & 0xff);
5733
5734         map = pci_map_single(bp->pdev, skb->data, pkt_size,
5735                 PCI_DMA_TODEVICE);
5736         if (pci_dma_mapping_error(bp->pdev, map)) {
5737                 dev_kfree_skb(skb);
5738                 return -EIO;
5739         }
5740
5741         REG_WR(bp, BNX2_HC_COMMAND,
5742                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5743
5744         REG_RD(bp, BNX2_HC_COMMAND);
5745
5746         udelay(5);
5747         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5748
5749         num_pkts = 0;
5750
5751         txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5752
5753         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5754         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5755         txbd->tx_bd_mss_nbytes = pkt_size;
5756         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5757
5758         num_pkts++;
5759         txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5760         txr->tx_prod_bseq += pkt_size;
5761
5762         REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5763         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5764
5765         udelay(100);
5766
5767         REG_WR(bp, BNX2_HC_COMMAND,
5768                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5769
5770         REG_RD(bp, BNX2_HC_COMMAND);
5771
5772         udelay(5);
5773
5774         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5775         dev_kfree_skb(skb);
5776
5777         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5778                 goto loopback_test_done;
5779
5780         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5781         if (rx_idx != rx_start_idx + num_pkts) {
5782                 goto loopback_test_done;
5783         }
5784
5785         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5786         rx_skb = rx_buf->skb;
5787
5788         rx_hdr = rx_buf->desc;
5789         skb_reserve(rx_skb, BNX2_RX_OFFSET);
5790
5791         pci_dma_sync_single_for_cpu(bp->pdev,
5792                 dma_unmap_addr(rx_buf, mapping),
5793                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5794
5795         if (rx_hdr->l2_fhdr_status &
5796                 (L2_FHDR_ERRORS_BAD_CRC |
5797                 L2_FHDR_ERRORS_PHY_DECODE |
5798                 L2_FHDR_ERRORS_ALIGNMENT |
5799                 L2_FHDR_ERRORS_TOO_SHORT |
5800                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5801
5802                 goto loopback_test_done;
5803         }
5804
5805         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5806                 goto loopback_test_done;
5807         }
5808
5809         for (i = 14; i < pkt_size; i++) {
5810                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5811                         goto loopback_test_done;
5812                 }
5813         }
5814
5815         ret = 0;
5816
5817 loopback_test_done:
5818         bp->loopback = 0;
5819         return ret;
5820 }
5821
5822 #define BNX2_MAC_LOOPBACK_FAILED        1
5823 #define BNX2_PHY_LOOPBACK_FAILED        2
5824 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5825                                          BNX2_PHY_LOOPBACK_FAILED)
5826
5827 static int
5828 bnx2_test_loopback(struct bnx2 *bp)
5829 {
5830         int rc = 0;
5831
5832         if (!netif_running(bp->dev))
5833                 return BNX2_LOOPBACK_FAILED;
5834
5835         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5836         spin_lock_bh(&bp->phy_lock);
5837         bnx2_init_phy(bp, 1);
5838         spin_unlock_bh(&bp->phy_lock);
5839         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5840                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5841         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5842                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5843         return rc;
5844 }
5845
5846 #define NVRAM_SIZE 0x200
5847 #define CRC32_RESIDUAL 0xdebb20e3
5848
5849 static int
5850 bnx2_test_nvram(struct bnx2 *bp)
5851 {
5852         __be32 buf[NVRAM_SIZE / 4];
5853         u8 *data = (u8 *) buf;
5854         int rc = 0;
5855         u32 magic, csum;
5856
5857         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5858                 goto test_nvram_done;
5859
5860         magic = be32_to_cpu(buf[0]);
5861         if (magic != 0x669955aa) {
5862                 rc = -ENODEV;
5863                 goto test_nvram_done;
5864         }
5865
5866         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5867                 goto test_nvram_done;
5868
5869         csum = ether_crc_le(0x100, data);
5870         if (csum != CRC32_RESIDUAL) {
5871                 rc = -ENODEV;
5872                 goto test_nvram_done;
5873         }
5874
5875         csum = ether_crc_le(0x100, data + 0x100);
5876         if (csum != CRC32_RESIDUAL) {
5877                 rc = -ENODEV;
5878         }
5879
5880 test_nvram_done:
5881         return rc;
5882 }
5883
5884 static int
5885 bnx2_test_link(struct bnx2 *bp)
5886 {
5887         u32 bmsr;
5888
5889         if (!netif_running(bp->dev))
5890                 return -ENODEV;
5891
5892         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5893                 if (bp->link_up)
5894                         return 0;
5895                 return -ENODEV;
5896         }
5897         spin_lock_bh(&bp->phy_lock);
5898         bnx2_enable_bmsr1(bp);
5899         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5900         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5901         bnx2_disable_bmsr1(bp);
5902         spin_unlock_bh(&bp->phy_lock);
5903
5904         if (bmsr & BMSR_LSTATUS) {
5905                 return 0;
5906         }
5907         return -ENODEV;
5908 }
5909
5910 static int
5911 bnx2_test_intr(struct bnx2 *bp)
5912 {
5913         int i;
5914         u16 status_idx;
5915
5916         if (!netif_running(bp->dev))
5917                 return -ENODEV;
5918
5919         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5920
5921         /* This register is not touched during run-time. */
5922         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5923         REG_RD(bp, BNX2_HC_COMMAND);
5924
5925         for (i = 0; i < 10; i++) {
5926                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5927                         status_idx) {
5928
5929                         break;
5930                 }
5931
5932                 msleep_interruptible(10);
5933         }
5934         if (i < 10)
5935                 return 0;
5936
5937         return -ENODEV;
5938 }
5939
5940 /* Determining link for parallel detection. */
5941 static int
5942 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5943 {
5944         u32 mode_ctl, an_dbg, exp;
5945
5946         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5947                 return 0;
5948
5949         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5950         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5951
5952         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5953                 return 0;
5954
5955         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5956         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5957         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5958
5959         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5960                 return 0;
5961
5962         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5963         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5964         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5965
5966         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
5967                 return 0;
5968
5969         return 1;
5970 }
5971
5972 static void
5973 bnx2_5706_serdes_timer(struct bnx2 *bp)
5974 {
5975         int check_link = 1;
5976
5977         spin_lock(&bp->phy_lock);
5978         if (bp->serdes_an_pending) {
5979                 bp->serdes_an_pending--;
5980                 check_link = 0;
5981         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5982                 u32 bmcr;
5983
5984                 bp->current_interval = BNX2_TIMER_INTERVAL;
5985
5986                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5987
5988                 if (bmcr & BMCR_ANENABLE) {
5989                         if (bnx2_5706_serdes_has_link(bp)) {
5990                                 bmcr &= ~BMCR_ANENABLE;
5991                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5992                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5993                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5994                         }
5995                 }
5996         }
5997         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5998                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5999                 u32 phy2;
6000
6001                 bnx2_write_phy(bp, 0x17, 0x0f01);
6002                 bnx2_read_phy(bp, 0x15, &phy2);
6003                 if (phy2 & 0x20) {
6004                         u32 bmcr;
6005
6006                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6007                         bmcr |= BMCR_ANENABLE;
6008                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6009
6010                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6011                 }
6012         } else
6013                 bp->current_interval = BNX2_TIMER_INTERVAL;
6014
6015         if (check_link) {
6016                 u32 val;
6017
6018                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6019                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6020                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6021
6022                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6023                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6024                                 bnx2_5706s_force_link_dn(bp, 1);
6025                                 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6026                         } else
6027                                 bnx2_set_link(bp);
6028                 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6029                         bnx2_set_link(bp);
6030         }
6031         spin_unlock(&bp->phy_lock);
6032 }
6033
6034 static void
6035 bnx2_5708_serdes_timer(struct bnx2 *bp)
6036 {
6037         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6038                 return;
6039
6040         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6041                 bp->serdes_an_pending = 0;
6042                 return;
6043         }
6044
6045         spin_lock(&bp->phy_lock);
6046         if (bp->serdes_an_pending)
6047                 bp->serdes_an_pending--;
6048         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6049                 u32 bmcr;
6050
6051                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6052                 if (bmcr & BMCR_ANENABLE) {
6053                         bnx2_enable_forced_2g5(bp);
6054                         bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6055                 } else {
6056                         bnx2_disable_forced_2g5(bp);
6057                         bp->serdes_an_pending = 2;
6058                         bp->current_interval = BNX2_TIMER_INTERVAL;
6059                 }
6060
6061         } else
6062                 bp->current_interval = BNX2_TIMER_INTERVAL;
6063
6064         spin_unlock(&bp->phy_lock);
6065 }
6066
6067 static void
6068 bnx2_timer(unsigned long data)
6069 {
6070         struct bnx2 *bp = (struct bnx2 *) data;
6071
6072         if (!netif_running(bp->dev))
6073                 return;
6074
6075         if (atomic_read(&bp->intr_sem) != 0)
6076                 goto bnx2_restart_timer;
6077
6078         if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6079              BNX2_FLAG_USING_MSI)
6080                 bnx2_chk_missed_msi(bp);
6081
6082         bnx2_send_heart_beat(bp);
6083
6084         bp->stats_blk->stat_FwRxDrop =
6085                 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6086
6087         /* workaround occasional corrupted counters */
6088         if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6089                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6090                                             BNX2_HC_COMMAND_STATS_NOW);
6091
6092         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6093                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
6094                         bnx2_5706_serdes_timer(bp);
6095                 else
6096                         bnx2_5708_serdes_timer(bp);
6097         }
6098
6099 bnx2_restart_timer:
6100         mod_timer(&bp->timer, jiffies + bp->current_interval);
6101 }
6102
6103 static int
6104 bnx2_request_irq(struct bnx2 *bp)
6105 {
6106         unsigned long flags;
6107         struct bnx2_irq *irq;
6108         int rc = 0, i;
6109
6110         if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6111                 flags = 0;
6112         else
6113                 flags = IRQF_SHARED;
6114
6115         for (i = 0; i < bp->irq_nvecs; i++) {
6116                 irq = &bp->irq_tbl[i];
6117                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6118                                  &bp->bnx2_napi[i]);
6119                 if (rc)
6120                         break;
6121                 irq->requested = 1;
6122         }
6123         return rc;
6124 }
6125
6126 static void
6127 bnx2_free_irq(struct bnx2 *bp)
6128 {
6129         struct bnx2_irq *irq;
6130         int i;
6131
6132         for (i = 0; i < bp->irq_nvecs; i++) {
6133                 irq = &bp->irq_tbl[i];
6134                 if (irq->requested)
6135                         free_irq(irq->vector, &bp->bnx2_napi[i]);
6136                 irq->requested = 0;
6137         }
6138         if (bp->flags & BNX2_FLAG_USING_MSI)
6139                 pci_disable_msi(bp->pdev);
6140         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6141                 pci_disable_msix(bp->pdev);
6142
6143         bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6144 }
6145
6146 static void
6147 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6148 {
6149         int i, rc;
6150         struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6151         struct net_device *dev = bp->dev;
6152         const int len = sizeof(bp->irq_tbl[0].name);
6153
6154         bnx2_setup_msix_tbl(bp);
6155         REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6156         REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6157         REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6158
6159         /*  Need to flush the previous three writes to ensure MSI-X
6160          *  is setup properly */
6161         REG_RD(bp, BNX2_PCI_MSIX_CONTROL);
6162
6163         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6164                 msix_ent[i].entry = i;
6165                 msix_ent[i].vector = 0;
6166         }
6167
6168         rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
6169         if (rc != 0)
6170                 return;
6171
6172         bp->irq_nvecs = msix_vecs;
6173         bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6174         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6175                 bp->irq_tbl[i].vector = msix_ent[i].vector;
6176                 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6177                 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6178         }
6179 }
6180
6181 static void
6182 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6183 {
6184         int cpus = num_online_cpus();
6185         int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
6186
6187         bp->irq_tbl[0].handler = bnx2_interrupt;
6188         strcpy(bp->irq_tbl[0].name, bp->dev->name);
6189         bp->irq_nvecs = 1;
6190         bp->irq_tbl[0].vector = bp->pdev->irq;
6191
6192         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi)
6193                 bnx2_enable_msix(bp, msix_vecs);
6194
6195         if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6196             !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6197                 if (pci_enable_msi(bp->pdev) == 0) {
6198                         bp->flags |= BNX2_FLAG_USING_MSI;
6199                         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6200                                 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6201                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6202                         } else
6203                                 bp->irq_tbl[0].handler = bnx2_msi;
6204
6205                         bp->irq_tbl[0].vector = bp->pdev->irq;
6206                 }
6207         }
6208
6209         bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6210         bp->dev->real_num_tx_queues = bp->num_tx_rings;
6211
6212         bp->num_rx_rings = bp->irq_nvecs;
6213 }
6214
6215 /* Called with rtnl_lock */
6216 static int
6217 bnx2_open(struct net_device *dev)
6218 {
6219         struct bnx2 *bp = netdev_priv(dev);
6220         int rc;
6221
6222         netif_carrier_off(dev);
6223
6224         bnx2_set_power_state(bp, PCI_D0);
6225         bnx2_disable_int(bp);
6226
6227         bnx2_setup_int_mode(bp, disable_msi);
6228         bnx2_init_napi(bp);
6229         bnx2_napi_enable(bp);
6230         rc = bnx2_alloc_mem(bp);
6231         if (rc)
6232                 goto open_err;
6233
6234         rc = bnx2_request_irq(bp);
6235         if (rc)
6236                 goto open_err;
6237
6238         rc = bnx2_init_nic(bp, 1);
6239         if (rc)
6240                 goto open_err;
6241
6242         mod_timer(&bp->timer, jiffies + bp->current_interval);
6243
6244         atomic_set(&bp->intr_sem, 0);
6245
6246         memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6247
6248         bnx2_enable_int(bp);
6249
6250         if (bp->flags & BNX2_FLAG_USING_MSI) {
6251                 /* Test MSI to make sure it is working
6252                  * If MSI test fails, go back to INTx mode
6253                  */
6254                 if (bnx2_test_intr(bp) != 0) {
6255                         netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6256
6257                         bnx2_disable_int(bp);
6258                         bnx2_free_irq(bp);
6259
6260                         bnx2_setup_int_mode(bp, 1);
6261
6262                         rc = bnx2_init_nic(bp, 0);
6263
6264                         if (!rc)
6265                                 rc = bnx2_request_irq(bp);
6266
6267                         if (rc) {
6268                                 del_timer_sync(&bp->timer);
6269                                 goto open_err;
6270                         }
6271                         bnx2_enable_int(bp);
6272                 }
6273         }
6274         if (bp->flags & BNX2_FLAG_USING_MSI)
6275                 netdev_info(dev, "using MSI\n");
6276         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6277                 netdev_info(dev, "using MSIX\n");
6278
6279         netif_tx_start_all_queues(dev);
6280
6281         return 0;
6282
6283 open_err:
6284         bnx2_napi_disable(bp);
6285         bnx2_free_skbs(bp);
6286         bnx2_free_irq(bp);
6287         bnx2_free_mem(bp);
6288         bnx2_del_napi(bp);
6289         return rc;
6290 }
6291
6292 static void
6293 bnx2_reset_task(struct work_struct *work)
6294 {
6295         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6296
6297         rtnl_lock();
6298         if (!netif_running(bp->dev)) {
6299                 rtnl_unlock();
6300                 return;
6301         }
6302
6303         bnx2_netif_stop(bp, true);
6304
6305         bnx2_init_nic(bp, 1);
6306
6307         atomic_set(&bp->intr_sem, 1);
6308         bnx2_netif_start(bp, true);
6309         rtnl_unlock();
6310 }
6311
6312 static void
6313 bnx2_dump_state(struct bnx2 *bp)
6314 {
6315         struct net_device *dev = bp->dev;
6316         u32 mcp_p0, mcp_p1, val1, val2;
6317
6318         pci_read_config_dword(bp->pdev, PCI_COMMAND, &val1);
6319         netdev_err(dev, "DEBUG: intr_sem[%x] PCI_CMD[%08x]\n",
6320                    atomic_read(&bp->intr_sem), val1);
6321         pci_read_config_dword(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &val1);
6322         pci_read_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, &val2);
6323         netdev_err(dev, "DEBUG: PCI_PM[%08x] PCI_MISC_CFG[%08x]\n", val1, val2);
6324         netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6325                    REG_RD(bp, BNX2_EMAC_TX_STATUS),
6326                    REG_RD(bp, BNX2_EMAC_RX_STATUS));
6327         netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6328                    REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6329         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6330                 mcp_p0 = BNX2_MCP_STATE_P0;
6331                 mcp_p1 = BNX2_MCP_STATE_P1;
6332         } else {
6333                 mcp_p0 = BNX2_MCP_STATE_P0_5708;
6334                 mcp_p1 = BNX2_MCP_STATE_P1_5708;
6335         }
6336         netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
6337                    bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
6338         netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6339                    REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6340         if (bp->flags & BNX2_FLAG_USING_MSIX)
6341                 netdev_err(dev, "DEBUG: PBA[%08x]\n",
6342                            REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6343 }
6344
6345 static void
6346 bnx2_tx_timeout(struct net_device *dev)
6347 {
6348         struct bnx2 *bp = netdev_priv(dev);
6349
6350         bnx2_dump_state(bp);
6351
6352         /* This allows the netif to be shutdown gracefully before resetting */
6353         schedule_work(&bp->reset_task);
6354 }
6355
6356 #ifdef BCM_VLAN
6357 /* Called with rtnl_lock */
6358 static void
6359 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6360 {
6361         struct bnx2 *bp = netdev_priv(dev);
6362
6363         if (netif_running(dev))
6364                 bnx2_netif_stop(bp, false);
6365
6366         bp->vlgrp = vlgrp;
6367
6368         if (!netif_running(dev))
6369                 return;
6370
6371         bnx2_set_rx_mode(dev);
6372         if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6373                 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
6374
6375         bnx2_netif_start(bp, false);
6376 }
6377 #endif
6378
6379 /* Called with netif_tx_lock.
6380  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6381  * netif_wake_queue().
6382  */
6383 static netdev_tx_t
6384 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6385 {
6386         struct bnx2 *bp = netdev_priv(dev);
6387         dma_addr_t mapping;
6388         struct tx_bd *txbd;
6389         struct sw_tx_bd *tx_buf;
6390         u32 len, vlan_tag_flags, last_frag, mss;
6391         u16 prod, ring_prod;
6392         int i;
6393         struct bnx2_napi *bnapi;
6394         struct bnx2_tx_ring_info *txr;
6395         struct netdev_queue *txq;
6396
6397         /*  Determine which tx ring we will be placed on */
6398         i = skb_get_queue_mapping(skb);
6399         bnapi = &bp->bnx2_napi[i];
6400         txr = &bnapi->tx_ring;
6401         txq = netdev_get_tx_queue(dev, i);
6402
6403         if (unlikely(bnx2_tx_avail(bp, txr) <
6404             (skb_shinfo(skb)->nr_frags + 1))) {
6405                 netif_tx_stop_queue(txq);
6406                 netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6407
6408                 return NETDEV_TX_BUSY;
6409         }
6410         len = skb_headlen(skb);
6411         prod = txr->tx_prod;
6412         ring_prod = TX_RING_IDX(prod);
6413
6414         vlan_tag_flags = 0;
6415         if (skb->ip_summed == CHECKSUM_PARTIAL) {
6416                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6417         }
6418
6419 #ifdef BCM_VLAN
6420         if (bp->vlgrp && vlan_tx_tag_present(skb)) {
6421                 vlan_tag_flags |=
6422                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6423         }
6424 #endif
6425         if ((mss = skb_shinfo(skb)->gso_size)) {
6426                 u32 tcp_opt_len;
6427                 struct iphdr *iph;
6428
6429                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6430
6431                 tcp_opt_len = tcp_optlen(skb);
6432
6433                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6434                         u32 tcp_off = skb_transport_offset(skb) -
6435                                       sizeof(struct ipv6hdr) - ETH_HLEN;
6436
6437                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6438                                           TX_BD_FLAGS_SW_FLAGS;
6439                         if (likely(tcp_off == 0))
6440                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6441                         else {
6442                                 tcp_off >>= 3;
6443                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
6444                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
6445                                                   ((tcp_off & 0x10) <<
6446                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
6447                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6448                         }
6449                 } else {
6450                         iph = ip_hdr(skb);
6451                         if (tcp_opt_len || (iph->ihl > 5)) {
6452                                 vlan_tag_flags |= ((iph->ihl - 5) +
6453                                                    (tcp_opt_len >> 2)) << 8;
6454                         }
6455                 }
6456         } else
6457                 mss = 0;
6458
6459         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6460         if (pci_dma_mapping_error(bp->pdev, mapping)) {
6461                 dev_kfree_skb(skb);
6462                 return NETDEV_TX_OK;
6463         }
6464
6465         tx_buf = &txr->tx_buf_ring[ring_prod];
6466         tx_buf->skb = skb;
6467         dma_unmap_addr_set(tx_buf, mapping, mapping);
6468
6469         txbd = &txr->tx_desc_ring[ring_prod];
6470
6471         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6472         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6473         txbd->tx_bd_mss_nbytes = len | (mss << 16);
6474         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6475
6476         last_frag = skb_shinfo(skb)->nr_frags;
6477         tx_buf->nr_frags = last_frag;
6478         tx_buf->is_gso = skb_is_gso(skb);
6479
6480         for (i = 0; i < last_frag; i++) {
6481                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6482
6483                 prod = NEXT_TX_BD(prod);
6484                 ring_prod = TX_RING_IDX(prod);
6485                 txbd = &txr->tx_desc_ring[ring_prod];
6486
6487                 len = frag->size;
6488                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
6489                         len, PCI_DMA_TODEVICE);
6490                 if (pci_dma_mapping_error(bp->pdev, mapping))
6491                         goto dma_error;
6492                 dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6493                                    mapping);
6494
6495                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6496                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6497                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6498                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6499
6500         }
6501         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6502
6503         prod = NEXT_TX_BD(prod);
6504         txr->tx_prod_bseq += skb->len;
6505
6506         REG_WR16(bp, txr->tx_bidx_addr, prod);
6507         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6508
6509         mmiowb();
6510
6511         txr->tx_prod = prod;
6512
6513         if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6514                 netif_tx_stop_queue(txq);
6515                 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6516                         netif_tx_wake_queue(txq);
6517         }
6518
6519         return NETDEV_TX_OK;
6520 dma_error:
6521         /* save value of frag that failed */
6522         last_frag = i;
6523
6524         /* start back at beginning and unmap skb */
6525         prod = txr->tx_prod;
6526         ring_prod = TX_RING_IDX(prod);
6527         tx_buf = &txr->tx_buf_ring[ring_prod];
6528         tx_buf->skb = NULL;
6529         pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
6530                          skb_headlen(skb), PCI_DMA_TODEVICE);
6531
6532         /* unmap remaining mapped pages */
6533         for (i = 0; i < last_frag; i++) {
6534                 prod = NEXT_TX_BD(prod);
6535                 ring_prod = TX_RING_IDX(prod);
6536                 tx_buf = &txr->tx_buf_ring[ring_prod];
6537                 pci_unmap_page(bp->pdev, dma_unmap_addr(tx_buf, mapping),
6538                                skb_shinfo(skb)->frags[i].size,
6539                                PCI_DMA_TODEVICE);
6540         }
6541
6542         dev_kfree_skb(skb);
6543         return NETDEV_TX_OK;
6544 }
6545
6546 /* Called with rtnl_lock */
6547 static int
6548 bnx2_close(struct net_device *dev)
6549 {
6550         struct bnx2 *bp = netdev_priv(dev);
6551
6552         cancel_work_sync(&bp->reset_task);
6553
6554         bnx2_disable_int_sync(bp);
6555         bnx2_napi_disable(bp);
6556         del_timer_sync(&bp->timer);
6557         bnx2_shutdown_chip(bp);
6558         bnx2_free_irq(bp);
6559         bnx2_free_skbs(bp);
6560         bnx2_free_mem(bp);
6561         bnx2_del_napi(bp);
6562         bp->link_up = 0;
6563         netif_carrier_off(bp->dev);
6564         bnx2_set_power_state(bp, PCI_D3hot);
6565         return 0;
6566 }
6567
6568 static void
6569 bnx2_save_stats(struct bnx2 *bp)
6570 {
6571         u32 *hw_stats = (u32 *) bp->stats_blk;
6572         u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6573         int i;
6574
6575         /* The 1st 10 counters are 64-bit counters */
6576         for (i = 0; i < 20; i += 2) {
6577                 u32 hi;
6578                 u64 lo;
6579
6580                 hi = temp_stats[i] + hw_stats[i];
6581                 lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6582                 if (lo > 0xffffffff)
6583                         hi++;
6584                 temp_stats[i] = hi;
6585                 temp_stats[i + 1] = lo & 0xffffffff;
6586         }
6587
6588         for ( ; i < sizeof(struct statistics_block) / 4; i++)
6589                 temp_stats[i] += hw_stats[i];
6590 }
6591
6592 #define GET_64BIT_NET_STATS64(ctr)                              \
6593         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
6594         (unsigned long) (ctr##_lo)
6595
6596 #define GET_64BIT_NET_STATS32(ctr)                              \
6597         (ctr##_lo)
6598
6599 #if (BITS_PER_LONG == 64)
6600 #define GET_64BIT_NET_STATS(ctr)                                \
6601         GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +             \
6602         GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6603 #else
6604 #define GET_64BIT_NET_STATS(ctr)                                \
6605         GET_64BIT_NET_STATS32(bp->stats_blk->ctr) +             \
6606         GET_64BIT_NET_STATS32(bp->temp_stats_blk->ctr)
6607 #endif
6608
6609 #define GET_32BIT_NET_STATS(ctr)                                \
6610         (unsigned long) (bp->stats_blk->ctr +                   \
6611                          bp->temp_stats_blk->ctr)
6612
6613 static struct net_device_stats *
6614 bnx2_get_stats(struct net_device *dev)
6615 {
6616         struct bnx2 *bp = netdev_priv(dev);
6617         struct net_device_stats *net_stats = &dev->stats;
6618
6619         if (bp->stats_blk == NULL) {
6620                 return net_stats;
6621         }
6622         net_stats->rx_packets =
6623                 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6624                 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6625                 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6626
6627         net_stats->tx_packets =
6628                 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6629                 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6630                 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6631
6632         net_stats->rx_bytes =
6633                 GET_64BIT_NET_STATS(stat_IfHCInOctets);
6634
6635         net_stats->tx_bytes =
6636                 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6637
6638         net_stats->multicast =
6639                 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts);
6640
6641         net_stats->collisions =
6642                 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6643
6644         net_stats->rx_length_errors =
6645                 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6646                 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6647
6648         net_stats->rx_over_errors =
6649                 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6650                 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6651
6652         net_stats->rx_frame_errors =
6653                 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6654
6655         net_stats->rx_crc_errors =
6656                 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6657
6658         net_stats->rx_errors = net_stats->rx_length_errors +
6659                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6660                 net_stats->rx_crc_errors;
6661
6662         net_stats->tx_aborted_errors =
6663                 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6664                 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6665
6666         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6667             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6668                 net_stats->tx_carrier_errors = 0;
6669         else {
6670                 net_stats->tx_carrier_errors =
6671                         GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6672         }
6673
6674         net_stats->tx_errors =
6675                 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6676                 net_stats->tx_aborted_errors +
6677                 net_stats->tx_carrier_errors;
6678
6679         net_stats->rx_missed_errors =
6680                 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6681                 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6682                 GET_32BIT_NET_STATS(stat_FwRxDrop);
6683
6684         return net_stats;
6685 }
6686
6687 /* All ethtool functions called with rtnl_lock */
6688
6689 static int
6690 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6691 {
6692         struct bnx2 *bp = netdev_priv(dev);
6693         int support_serdes = 0, support_copper = 0;
6694
6695         cmd->supported = SUPPORTED_Autoneg;
6696         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6697                 support_serdes = 1;
6698                 support_copper = 1;
6699         } else if (bp->phy_port == PORT_FIBRE)
6700                 support_serdes = 1;
6701         else
6702                 support_copper = 1;
6703
6704         if (support_serdes) {
6705                 cmd->supported |= SUPPORTED_1000baseT_Full |
6706                         SUPPORTED_FIBRE;
6707                 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6708                         cmd->supported |= SUPPORTED_2500baseX_Full;
6709
6710         }
6711         if (support_copper) {
6712                 cmd->supported |= SUPPORTED_10baseT_Half |
6713                         SUPPORTED_10baseT_Full |
6714                         SUPPORTED_100baseT_Half |
6715                         SUPPORTED_100baseT_Full |
6716                         SUPPORTED_1000baseT_Full |
6717                         SUPPORTED_TP;
6718
6719         }
6720
6721         spin_lock_bh(&bp->phy_lock);
6722         cmd->port = bp->phy_port;
6723         cmd->advertising = bp->advertising;
6724
6725         if (bp->autoneg & AUTONEG_SPEED) {
6726                 cmd->autoneg = AUTONEG_ENABLE;
6727         }
6728         else {
6729                 cmd->autoneg = AUTONEG_DISABLE;
6730         }
6731
6732         if (netif_carrier_ok(dev)) {
6733                 cmd->speed = bp->line_speed;
6734                 cmd->duplex = bp->duplex;
6735         }
6736         else {
6737                 cmd->speed = -1;
6738                 cmd->duplex = -1;
6739         }
6740         spin_unlock_bh(&bp->phy_lock);
6741
6742         cmd->transceiver = XCVR_INTERNAL;
6743         cmd->phy_address = bp->phy_addr;
6744
6745         return 0;
6746 }
6747
6748 static int
6749 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6750 {
6751         struct bnx2 *bp = netdev_priv(dev);
6752         u8 autoneg = bp->autoneg;
6753         u8 req_duplex = bp->req_duplex;
6754         u16 req_line_speed = bp->req_line_speed;
6755         u32 advertising = bp->advertising;
6756         int err = -EINVAL;
6757
6758         spin_lock_bh(&bp->phy_lock);
6759
6760         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6761                 goto err_out_unlock;
6762
6763         if (cmd->port != bp->phy_port &&
6764             !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6765                 goto err_out_unlock;
6766
6767         /* If device is down, we can store the settings only if the user
6768          * is setting the currently active port.
6769          */
6770         if (!netif_running(dev) && cmd->port != bp->phy_port)
6771                 goto err_out_unlock;
6772
6773         if (cmd->autoneg == AUTONEG_ENABLE) {
6774                 autoneg |= AUTONEG_SPEED;
6775
6776                 advertising = cmd->advertising;
6777                 if (cmd->port == PORT_TP) {
6778                         advertising &= ETHTOOL_ALL_COPPER_SPEED;
6779                         if (!advertising)
6780                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
6781                 } else {
6782                         advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6783                         if (!advertising)
6784                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6785                 }
6786                 advertising |= ADVERTISED_Autoneg;
6787         }
6788         else {
6789                 if (cmd->port == PORT_FIBRE) {
6790                         if ((cmd->speed != SPEED_1000 &&
6791                              cmd->speed != SPEED_2500) ||
6792                             (cmd->duplex != DUPLEX_FULL))
6793                                 goto err_out_unlock;
6794
6795                         if (cmd->speed == SPEED_2500 &&
6796                             !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6797                                 goto err_out_unlock;
6798                 }
6799                 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6800                         goto err_out_unlock;
6801
6802                 autoneg &= ~AUTONEG_SPEED;
6803                 req_line_speed = cmd->speed;
6804                 req_duplex = cmd->duplex;
6805                 advertising = 0;
6806         }
6807
6808         bp->autoneg = autoneg;
6809         bp->advertising = advertising;
6810         bp->req_line_speed = req_line_speed;
6811         bp->req_duplex = req_duplex;
6812
6813         err = 0;
6814         /* If device is down, the new settings will be picked up when it is
6815          * brought up.
6816          */
6817         if (netif_running(dev))
6818                 err = bnx2_setup_phy(bp, cmd->port);
6819
6820 err_out_unlock:
6821         spin_unlock_bh(&bp->phy_lock);
6822
6823         return err;
6824 }
6825
6826 static void
6827 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6828 {
6829         struct bnx2 *bp = netdev_priv(dev);
6830
6831         strcpy(info->driver, DRV_MODULE_NAME);
6832         strcpy(info->version, DRV_MODULE_VERSION);
6833         strcpy(info->bus_info, pci_name(bp->pdev));
6834         strcpy(info->fw_version, bp->fw_version);
6835 }
6836
6837 #define BNX2_REGDUMP_LEN                (32 * 1024)
6838
6839 static int
6840 bnx2_get_regs_len(struct net_device *dev)
6841 {
6842         return BNX2_REGDUMP_LEN;
6843 }
6844
6845 static void
6846 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6847 {
6848         u32 *p = _p, i, offset;
6849         u8 *orig_p = _p;
6850         struct bnx2 *bp = netdev_priv(dev);
6851         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6852                                  0x0800, 0x0880, 0x0c00, 0x0c10,
6853                                  0x0c30, 0x0d08, 0x1000, 0x101c,
6854                                  0x1040, 0x1048, 0x1080, 0x10a4,
6855                                  0x1400, 0x1490, 0x1498, 0x14f0,
6856                                  0x1500, 0x155c, 0x1580, 0x15dc,
6857                                  0x1600, 0x1658, 0x1680, 0x16d8,
6858                                  0x1800, 0x1820, 0x1840, 0x1854,
6859                                  0x1880, 0x1894, 0x1900, 0x1984,
6860                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6861                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
6862                                  0x2000, 0x2030, 0x23c0, 0x2400,
6863                                  0x2800, 0x2820, 0x2830, 0x2850,
6864                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
6865                                  0x3c00, 0x3c94, 0x4000, 0x4010,
6866                                  0x4080, 0x4090, 0x43c0, 0x4458,
6867                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
6868                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
6869                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
6870                                  0x5fc0, 0x6000, 0x6400, 0x6428,
6871                                  0x6800, 0x6848, 0x684c, 0x6860,
6872                                  0x6888, 0x6910, 0x8000 };
6873
6874         regs->version = 0;
6875
6876         memset(p, 0, BNX2_REGDUMP_LEN);
6877
6878         if (!netif_running(bp->dev))
6879                 return;
6880
6881         i = 0;
6882         offset = reg_boundaries[0];
6883         p += offset;
6884         while (offset < BNX2_REGDUMP_LEN) {
6885                 *p++ = REG_RD(bp, offset);
6886                 offset += 4;
6887                 if (offset == reg_boundaries[i + 1]) {
6888                         offset = reg_boundaries[i + 2];
6889                         p = (u32 *) (orig_p + offset);
6890                         i += 2;
6891                 }
6892         }
6893 }
6894
6895 static void
6896 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6897 {
6898         struct bnx2 *bp = netdev_priv(dev);
6899
6900         if (bp->flags & BNX2_FLAG_NO_WOL) {
6901                 wol->supported = 0;
6902                 wol->wolopts = 0;
6903         }
6904         else {
6905                 wol->supported = WAKE_MAGIC;
6906                 if (bp->wol)
6907                         wol->wolopts = WAKE_MAGIC;
6908                 else
6909                         wol->wolopts = 0;
6910         }
6911         memset(&wol->sopass, 0, sizeof(wol->sopass));
6912 }
6913
6914 static int
6915 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6916 {
6917         struct bnx2 *bp = netdev_priv(dev);
6918
6919         if (wol->wolopts & ~WAKE_MAGIC)
6920                 return -EINVAL;
6921
6922         if (wol->wolopts & WAKE_MAGIC) {
6923                 if (bp->flags & BNX2_FLAG_NO_WOL)
6924                         return -EINVAL;
6925
6926                 bp->wol = 1;
6927         }
6928         else {
6929                 bp->wol = 0;
6930         }
6931         return 0;
6932 }
6933
6934 static int
6935 bnx2_nway_reset(struct net_device *dev)
6936 {
6937         struct bnx2 *bp = netdev_priv(dev);
6938         u32 bmcr;
6939
6940         if (!netif_running(dev))
6941                 return -EAGAIN;
6942
6943         if (!(bp->autoneg & AUTONEG_SPEED)) {
6944                 return -EINVAL;
6945         }
6946
6947         spin_lock_bh(&bp->phy_lock);
6948
6949         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6950                 int rc;
6951
6952                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6953                 spin_unlock_bh(&bp->phy_lock);
6954                 return rc;
6955         }
6956
6957         /* Force a link down visible on the other side */
6958         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6959                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6960                 spin_unlock_bh(&bp->phy_lock);
6961
6962                 msleep(20);
6963
6964                 spin_lock_bh(&bp->phy_lock);
6965
6966                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
6967                 bp->serdes_an_pending = 1;
6968                 mod_timer(&bp->timer, jiffies + bp->current_interval);
6969         }
6970
6971         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6972         bmcr &= ~BMCR_LOOPBACK;
6973         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6974
6975         spin_unlock_bh(&bp->phy_lock);
6976
6977         return 0;
6978 }
6979
6980 static u32
6981 bnx2_get_link(struct net_device *dev)
6982 {
6983         struct bnx2 *bp = netdev_priv(dev);
6984
6985         return bp->link_up;
6986 }
6987
6988 static int
6989 bnx2_get_eeprom_len(struct net_device *dev)
6990 {
6991         struct bnx2 *bp = netdev_priv(dev);
6992
6993         if (bp->flash_info == NULL)
6994                 return 0;
6995
6996         return (int) bp->flash_size;
6997 }
6998
6999 static int
7000 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7001                 u8 *eebuf)
7002 {
7003         struct bnx2 *bp = netdev_priv(dev);
7004         int rc;
7005
7006         if (!netif_running(dev))
7007                 return -EAGAIN;
7008
7009         /* parameters already validated in ethtool_get_eeprom */
7010
7011         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7012
7013         return rc;
7014 }
7015
7016 static int
7017 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7018                 u8 *eebuf)
7019 {
7020         struct bnx2 *bp = netdev_priv(dev);
7021         int rc;
7022
7023         if (!netif_running(dev))
7024                 return -EAGAIN;
7025
7026         /* parameters already validated in ethtool_set_eeprom */
7027
7028         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7029
7030         return rc;
7031 }
7032
7033 static int
7034 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7035 {
7036         struct bnx2 *bp = netdev_priv(dev);
7037
7038         memset(coal, 0, sizeof(struct ethtool_coalesce));
7039
7040         coal->rx_coalesce_usecs = bp->rx_ticks;
7041         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7042         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7043         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7044
7045         coal->tx_coalesce_usecs = bp->tx_ticks;
7046         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7047         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7048         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7049
7050         coal->stats_block_coalesce_usecs = bp->stats_ticks;
7051
7052         return 0;
7053 }
7054
7055 static int
7056 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7057 {
7058         struct bnx2 *bp = netdev_priv(dev);
7059
7060         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7061         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7062
7063         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7064         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7065
7066         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7067         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7068
7069         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7070         if (bp->rx_quick_cons_trip_int > 0xff)
7071                 bp->rx_quick_cons_trip_int = 0xff;
7072
7073         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7074         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7075
7076         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7077         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7078
7079         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7080         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7081
7082         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7083         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7084                 0xff;
7085
7086         bp->stats_ticks = coal->stats_block_coalesce_usecs;
7087         if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7088                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7089                         bp->stats_ticks = USEC_PER_SEC;
7090         }
7091         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7092                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7093         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7094
7095         if (netif_running(bp->dev)) {
7096                 bnx2_netif_stop(bp, true);
7097                 bnx2_init_nic(bp, 0);
7098                 bnx2_netif_start(bp, true);
7099         }
7100
7101         return 0;
7102 }
7103
7104 static void
7105 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7106 {
7107         struct bnx2 *bp = netdev_priv(dev);
7108
7109         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
7110         ering->rx_mini_max_pending = 0;
7111         ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
7112
7113         ering->rx_pending = bp->rx_ring_size;
7114         ering->rx_mini_pending = 0;
7115         ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7116
7117         ering->tx_max_pending = MAX_TX_DESC_CNT;
7118         ering->tx_pending = bp->tx_ring_size;
7119 }
7120
7121 static int
7122 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
7123 {
7124         if (netif_running(bp->dev)) {
7125                 /* Reset will erase chipset stats; save them */
7126                 bnx2_save_stats(bp);
7127
7128                 bnx2_netif_stop(bp, true);
7129                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7130                 bnx2_free_skbs(bp);
7131                 bnx2_free_mem(bp);
7132         }
7133
7134         bnx2_set_rx_ring_size(bp, rx);
7135         bp->tx_ring_size = tx;
7136
7137         if (netif_running(bp->dev)) {
7138                 int rc;
7139
7140                 rc = bnx2_alloc_mem(bp);
7141                 if (!rc)
7142                         rc = bnx2_init_nic(bp, 0);
7143
7144                 if (rc) {
7145                         bnx2_napi_enable(bp);
7146                         dev_close(bp->dev);
7147                         return rc;
7148                 }
7149 #ifdef BCM_CNIC
7150                 mutex_lock(&bp->cnic_lock);
7151                 /* Let cnic know about the new status block. */
7152                 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7153                         bnx2_setup_cnic_irq_info(bp);
7154                 mutex_unlock(&bp->cnic_lock);
7155 #endif
7156                 bnx2_netif_start(bp, true);
7157         }
7158         return 0;
7159 }
7160
7161 static int
7162 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7163 {
7164         struct bnx2 *bp = netdev_priv(dev);
7165         int rc;
7166
7167         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
7168                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
7169                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7170
7171                 return -EINVAL;
7172         }
7173         rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
7174         return rc;
7175 }
7176
7177 static void
7178 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7179 {
7180         struct bnx2 *bp = netdev_priv(dev);
7181
7182         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7183         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7184         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7185 }
7186
7187 static int
7188 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7189 {
7190         struct bnx2 *bp = netdev_priv(dev);
7191
7192         bp->req_flow_ctrl = 0;
7193         if (epause->rx_pause)
7194                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7195         if (epause->tx_pause)
7196                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7197
7198         if (epause->autoneg) {
7199                 bp->autoneg |= AUTONEG_FLOW_CTRL;
7200         }
7201         else {
7202                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7203         }
7204
7205         if (netif_running(dev)) {
7206                 spin_lock_bh(&bp->phy_lock);
7207                 bnx2_setup_phy(bp, bp->phy_port);
7208                 spin_unlock_bh(&bp->phy_lock);
7209         }
7210
7211         return 0;
7212 }
7213
7214 static u32
7215 bnx2_get_rx_csum(struct net_device *dev)
7216 {
7217         struct bnx2 *bp = netdev_priv(dev);
7218
7219         return bp->rx_csum;
7220 }
7221
7222 static int
7223 bnx2_set_rx_csum(struct net_device *dev, u32 data)
7224 {
7225         struct bnx2 *bp = netdev_priv(dev);
7226
7227         bp->rx_csum = data;
7228         return 0;
7229 }
7230
7231 static int
7232 bnx2_set_tso(struct net_device *dev, u32 data)
7233 {
7234         struct bnx2 *bp = netdev_priv(dev);
7235
7236         if (data) {
7237                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7238                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7239                         dev->features |= NETIF_F_TSO6;
7240         } else
7241                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
7242                                    NETIF_F_TSO_ECN);
7243         return 0;
7244 }
7245
7246 static struct {
7247         char string[ETH_GSTRING_LEN];
7248 } bnx2_stats_str_arr[] = {
7249         { "rx_bytes" },
7250         { "rx_error_bytes" },
7251         { "tx_bytes" },
7252         { "tx_error_bytes" },
7253         { "rx_ucast_packets" },
7254         { "rx_mcast_packets" },
7255         { "rx_bcast_packets" },
7256         { "tx_ucast_packets" },
7257         { "tx_mcast_packets" },
7258         { "tx_bcast_packets" },
7259         { "tx_mac_errors" },
7260         { "tx_carrier_errors" },
7261         { "rx_crc_errors" },
7262         { "rx_align_errors" },
7263         { "tx_single_collisions" },
7264         { "tx_multi_collisions" },
7265         { "tx_deferred" },
7266         { "tx_excess_collisions" },
7267         { "tx_late_collisions" },
7268         { "tx_total_collisions" },
7269         { "rx_fragments" },
7270         { "rx_jabbers" },
7271         { "rx_undersize_packets" },
7272         { "rx_oversize_packets" },
7273         { "rx_64_byte_packets" },
7274         { "rx_65_to_127_byte_packets" },
7275         { "rx_128_to_255_byte_packets" },
7276         { "rx_256_to_511_byte_packets" },
7277         { "rx_512_to_1023_byte_packets" },
7278         { "rx_1024_to_1522_byte_packets" },
7279         { "rx_1523_to_9022_byte_packets" },
7280         { "tx_64_byte_packets" },
7281         { "tx_65_to_127_byte_packets" },
7282         { "tx_128_to_255_byte_packets" },
7283         { "tx_256_to_511_byte_packets" },
7284         { "tx_512_to_1023_byte_packets" },
7285         { "tx_1024_to_1522_byte_packets" },
7286         { "tx_1523_to_9022_byte_packets" },
7287         { "rx_xon_frames" },
7288         { "rx_xoff_frames" },
7289         { "tx_xon_frames" },
7290         { "tx_xoff_frames" },
7291         { "rx_mac_ctrl_frames" },
7292         { "rx_filtered_packets" },
7293         { "rx_ftq_discards" },
7294         { "rx_discards" },
7295         { "rx_fw_discards" },
7296 };
7297
7298 #define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\
7299                         sizeof(bnx2_stats_str_arr[0]))
7300
7301 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7302
7303 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7304     STATS_OFFSET32(stat_IfHCInOctets_hi),
7305     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7306     STATS_OFFSET32(stat_IfHCOutOctets_hi),
7307     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7308     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7309     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7310     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7311     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7312     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7313     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7314     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7315     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7316     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7317     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7318     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7319     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7320     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7321     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7322     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7323     STATS_OFFSET32(stat_EtherStatsCollisions),
7324     STATS_OFFSET32(stat_EtherStatsFragments),
7325     STATS_OFFSET32(stat_EtherStatsJabbers),
7326     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7327     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7328     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7329     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7330     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7331     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7332     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7333     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7334     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7335     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7336     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7337     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7338     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7339     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7340     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7341     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7342     STATS_OFFSET32(stat_XonPauseFramesReceived),
7343     STATS_OFFSET32(stat_XoffPauseFramesReceived),
7344     STATS_OFFSET32(stat_OutXonSent),
7345     STATS_OFFSET32(stat_OutXoffSent),
7346     STATS_OFFSET32(stat_MacControlFramesReceived),
7347     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7348     STATS_OFFSET32(stat_IfInFTQDiscards),
7349     STATS_OFFSET32(stat_IfInMBUFDiscards),
7350     STATS_OFFSET32(stat_FwRxDrop),
7351 };
7352
7353 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7354  * skipped because of errata.
7355  */
7356 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7357         8,0,8,8,8,8,8,8,8,8,
7358         4,0,4,4,4,4,4,4,4,4,
7359         4,4,4,4,4,4,4,4,4,4,
7360         4,4,4,4,4,4,4,4,4,4,
7361         4,4,4,4,4,4,4,
7362 };
7363
7364 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7365         8,0,8,8,8,8,8,8,8,8,
7366         4,4,4,4,4,4,4,4,4,4,
7367         4,4,4,4,4,4,4,4,4,4,
7368         4,4,4,4,4,4,4,4,4,4,
7369         4,4,4,4,4,4,4,
7370 };
7371
7372 #define BNX2_NUM_TESTS 6
7373
7374 static struct {
7375         char string[ETH_GSTRING_LEN];
7376 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7377         { "register_test (offline)" },
7378         { "memory_test (offline)" },
7379         { "loopback_test (offline)" },
7380         { "nvram_test (online)" },
7381         { "interrupt_test (online)" },
7382         { "link_test (online)" },
7383 };
7384
7385 static int
7386 bnx2_get_sset_count(struct net_device *dev, int sset)
7387 {
7388         switch (sset) {
7389         case ETH_SS_TEST:
7390                 return BNX2_NUM_TESTS;
7391         case ETH_SS_STATS:
7392                 return BNX2_NUM_STATS;
7393         default:
7394                 return -EOPNOTSUPP;
7395         }
7396 }
7397
7398 static void
7399 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7400 {
7401         struct bnx2 *bp = netdev_priv(dev);
7402
7403         bnx2_set_power_state(bp, PCI_D0);
7404
7405         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7406         if (etest->flags & ETH_TEST_FL_OFFLINE) {
7407                 int i;
7408
7409                 bnx2_netif_stop(bp, true);
7410                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7411                 bnx2_free_skbs(bp);
7412
7413                 if (bnx2_test_registers(bp) != 0) {
7414                         buf[0] = 1;
7415                         etest->flags |= ETH_TEST_FL_FAILED;
7416                 }
7417                 if (bnx2_test_memory(bp) != 0) {
7418                         buf[1] = 1;
7419                         etest->flags |= ETH_TEST_FL_FAILED;
7420                 }
7421                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7422                         etest->flags |= ETH_TEST_FL_FAILED;
7423
7424                 if (!netif_running(bp->dev))
7425                         bnx2_shutdown_chip(bp);
7426                 else {
7427                         bnx2_init_nic(bp, 1);
7428                         bnx2_netif_start(bp, true);
7429                 }
7430
7431                 /* wait for link up */
7432                 for (i = 0; i < 7; i++) {
7433                         if (bp->link_up)
7434                                 break;
7435                         msleep_interruptible(1000);
7436                 }
7437         }
7438
7439         if (bnx2_test_nvram(bp) != 0) {
7440                 buf[3] = 1;
7441                 etest->flags |= ETH_TEST_FL_FAILED;
7442         }
7443         if (bnx2_test_intr(bp) != 0) {
7444                 buf[4] = 1;
7445                 etest->flags |= ETH_TEST_FL_FAILED;
7446         }
7447
7448         if (bnx2_test_link(bp) != 0) {
7449                 buf[5] = 1;
7450                 etest->flags |= ETH_TEST_FL_FAILED;
7451
7452         }
7453         if (!netif_running(bp->dev))
7454                 bnx2_set_power_state(bp, PCI_D3hot);
7455 }
7456
7457 static void
7458 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7459 {
7460         switch (stringset) {
7461         case ETH_SS_STATS:
7462                 memcpy(buf, bnx2_stats_str_arr,
7463                         sizeof(bnx2_stats_str_arr));
7464                 break;
7465         case ETH_SS_TEST:
7466                 memcpy(buf, bnx2_tests_str_arr,
7467                         sizeof(bnx2_tests_str_arr));
7468                 break;
7469         }
7470 }
7471
7472 static void
7473 bnx2_get_ethtool_stats(struct net_device *dev,
7474                 struct ethtool_stats *stats, u64 *buf)
7475 {
7476         struct bnx2 *bp = netdev_priv(dev);
7477         int i;
7478         u32 *hw_stats = (u32 *) bp->stats_blk;
7479         u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7480         u8 *stats_len_arr = NULL;
7481
7482         if (hw_stats == NULL) {
7483                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7484                 return;
7485         }
7486
7487         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7488             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7489             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7490             (CHIP_ID(bp) == CHIP_ID_5708_A0))
7491                 stats_len_arr = bnx2_5706_stats_len_arr;
7492         else
7493                 stats_len_arr = bnx2_5708_stats_len_arr;
7494
7495         for (i = 0; i < BNX2_NUM_STATS; i++) {
7496                 unsigned long offset;
7497
7498                 if (stats_len_arr[i] == 0) {
7499                         /* skip this counter */
7500                         buf[i] = 0;
7501                         continue;
7502                 }
7503
7504                 offset = bnx2_stats_offset_arr[i];
7505                 if (stats_len_arr[i] == 4) {
7506                         /* 4-byte counter */
7507                         buf[i] = (u64) *(hw_stats + offset) +
7508                                  *(temp_stats + offset);
7509                         continue;
7510                 }
7511                 /* 8-byte counter */
7512                 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7513                          *(hw_stats + offset + 1) +
7514                          (((u64) *(temp_stats + offset)) << 32) +
7515                          *(temp_stats + offset + 1);
7516         }
7517 }
7518
7519 static int
7520 bnx2_phys_id(struct net_device *dev, u32 data)
7521 {
7522         struct bnx2 *bp = netdev_priv(dev);
7523         int i;
7524         u32 save;
7525
7526         bnx2_set_power_state(bp, PCI_D0);
7527
7528         if (data == 0)
7529                 data = 2;
7530
7531         save = REG_RD(bp, BNX2_MISC_CFG);
7532         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7533
7534         for (i = 0; i < (data * 2); i++) {
7535                 if ((i % 2) == 0) {
7536                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7537                 }
7538                 else {
7539                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7540                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
7541                                 BNX2_EMAC_LED_100MB_OVERRIDE |
7542                                 BNX2_EMAC_LED_10MB_OVERRIDE |
7543                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7544                                 BNX2_EMAC_LED_TRAFFIC);
7545                 }
7546                 msleep_interruptible(500);
7547                 if (signal_pending(current))
7548                         break;
7549         }
7550         REG_WR(bp, BNX2_EMAC_LED, 0);
7551         REG_WR(bp, BNX2_MISC_CFG, save);
7552
7553         if (!netif_running(dev))
7554                 bnx2_set_power_state(bp, PCI_D3hot);
7555
7556         return 0;
7557 }
7558
7559 static int
7560 bnx2_set_tx_csum(struct net_device *dev, u32 data)
7561 {
7562         struct bnx2 *bp = netdev_priv(dev);
7563
7564         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7565                 return (ethtool_op_set_tx_ipv6_csum(dev, data));
7566         else
7567                 return (ethtool_op_set_tx_csum(dev, data));
7568 }
7569
7570 static int
7571 bnx2_set_flags(struct net_device *dev, u32 data)
7572 {
7573         return ethtool_op_set_flags(dev, data, ETH_FLAG_RXHASH);
7574 }
7575
7576 static const struct ethtool_ops bnx2_ethtool_ops = {
7577         .get_settings           = bnx2_get_settings,
7578         .set_settings           = bnx2_set_settings,
7579         .get_drvinfo            = bnx2_get_drvinfo,
7580         .get_regs_len           = bnx2_get_regs_len,
7581         .get_regs               = bnx2_get_regs,
7582         .get_wol                = bnx2_get_wol,
7583         .set_wol                = bnx2_set_wol,
7584         .nway_reset             = bnx2_nway_reset,
7585         .get_link               = bnx2_get_link,
7586         .get_eeprom_len         = bnx2_get_eeprom_len,
7587         .get_eeprom             = bnx2_get_eeprom,
7588         .set_eeprom             = bnx2_set_eeprom,
7589         .get_coalesce           = bnx2_get_coalesce,
7590         .set_coalesce           = bnx2_set_coalesce,
7591         .get_ringparam          = bnx2_get_ringparam,
7592         .set_ringparam          = bnx2_set_ringparam,
7593         .get_pauseparam         = bnx2_get_pauseparam,
7594         .set_pauseparam         = bnx2_set_pauseparam,
7595         .get_rx_csum            = bnx2_get_rx_csum,
7596         .set_rx_csum            = bnx2_set_rx_csum,
7597         .set_tx_csum            = bnx2_set_tx_csum,
7598         .set_sg                 = ethtool_op_set_sg,
7599         .set_tso                = bnx2_set_tso,
7600         .self_test              = bnx2_self_test,
7601         .get_strings            = bnx2_get_strings,
7602         .phys_id                = bnx2_phys_id,
7603         .get_ethtool_stats      = bnx2_get_ethtool_stats,
7604         .get_sset_count         = bnx2_get_sset_count,
7605         .set_flags              = bnx2_set_flags,
7606         .get_flags              = ethtool_op_get_flags,
7607 };
7608
7609 /* Called with rtnl_lock */
7610 static int
7611 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7612 {
7613         struct mii_ioctl_data *data = if_mii(ifr);
7614         struct bnx2 *bp = netdev_priv(dev);
7615         int err;
7616
7617         switch(cmd) {
7618         case SIOCGMIIPHY:
7619                 data->phy_id = bp->phy_addr;
7620
7621                 /* fallthru */
7622         case SIOCGMIIREG: {
7623                 u32 mii_regval;
7624
7625                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7626                         return -EOPNOTSUPP;
7627
7628                 if (!netif_running(dev))
7629                         return -EAGAIN;
7630
7631                 spin_lock_bh(&bp->phy_lock);
7632                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7633                 spin_unlock_bh(&bp->phy_lock);
7634
7635                 data->val_out = mii_regval;
7636
7637                 return err;
7638         }
7639
7640         case SIOCSMIIREG:
7641                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7642                         return -EOPNOTSUPP;
7643
7644                 if (!netif_running(dev))
7645                         return -EAGAIN;
7646
7647                 spin_lock_bh(&bp->phy_lock);
7648                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7649                 spin_unlock_bh(&bp->phy_lock);
7650
7651                 return err;
7652
7653         default:
7654                 /* do nothing */
7655                 break;
7656         }
7657         return -EOPNOTSUPP;
7658 }
7659
7660 /* Called with rtnl_lock */
7661 static int
7662 bnx2_change_mac_addr(struct net_device *dev, void *p)
7663 {
7664         struct sockaddr *addr = p;
7665         struct bnx2 *bp = netdev_priv(dev);
7666
7667         if (!is_valid_ether_addr(addr->sa_data))
7668                 return -EINVAL;
7669
7670         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7671         if (netif_running(dev))
7672                 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7673
7674         return 0;
7675 }
7676
7677 /* Called with rtnl_lock */
7678 static int
7679 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7680 {
7681         struct bnx2 *bp = netdev_priv(dev);
7682
7683         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7684                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7685                 return -EINVAL;
7686
7687         dev->mtu = new_mtu;
7688         return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7689 }
7690
7691 #ifdef CONFIG_NET_POLL_CONTROLLER
7692 static void
7693 poll_bnx2(struct net_device *dev)
7694 {
7695         struct bnx2 *bp = netdev_priv(dev);
7696         int i;
7697
7698         for (i = 0; i < bp->irq_nvecs; i++) {
7699                 struct bnx2_irq *irq = &bp->irq_tbl[i];
7700
7701                 disable_irq(irq->vector);
7702                 irq->handler(irq->vector, &bp->bnx2_napi[i]);
7703                 enable_irq(irq->vector);
7704         }
7705 }
7706 #endif
7707
7708 static void __devinit
7709 bnx2_get_5709_media(struct bnx2 *bp)
7710 {
7711         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7712         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7713         u32 strap;
7714
7715         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7716                 return;
7717         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7718                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7719                 return;
7720         }
7721
7722         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7723                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7724         else
7725                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7726
7727         if (PCI_FUNC(bp->pdev->devfn) == 0) {
7728                 switch (strap) {
7729                 case 0x4:
7730                 case 0x5:
7731                 case 0x6:
7732                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7733                         return;
7734                 }
7735         } else {
7736                 switch (strap) {
7737                 case 0x1:
7738                 case 0x2:
7739                 case 0x4:
7740                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7741                         return;
7742                 }
7743         }
7744 }
7745
7746 static void __devinit
7747 bnx2_get_pci_speed(struct bnx2 *bp)
7748 {
7749         u32 reg;
7750
7751         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7752         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7753                 u32 clkreg;
7754
7755                 bp->flags |= BNX2_FLAG_PCIX;
7756
7757                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7758
7759                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7760                 switch (clkreg) {
7761                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7762                         bp->bus_speed_mhz = 133;
7763                         break;
7764
7765                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7766                         bp->bus_speed_mhz = 100;
7767                         break;
7768
7769                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7770                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7771                         bp->bus_speed_mhz = 66;
7772                         break;
7773
7774                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7775                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7776                         bp->bus_speed_mhz = 50;
7777                         break;
7778
7779                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7780                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7781                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7782                         bp->bus_speed_mhz = 33;
7783                         break;
7784                 }
7785         }
7786         else {
7787                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7788                         bp->bus_speed_mhz = 66;
7789                 else
7790                         bp->bus_speed_mhz = 33;
7791         }
7792
7793         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7794                 bp->flags |= BNX2_FLAG_PCI_32BIT;
7795
7796 }
7797
7798 static void __devinit
7799 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7800 {
7801         int rc, i, j;
7802         u8 *data;
7803         unsigned int block_end, rosize, len;
7804
7805 #define BNX2_VPD_NVRAM_OFFSET   0x300
7806 #define BNX2_VPD_LEN            128
7807 #define BNX2_MAX_VER_SLEN       30
7808
7809         data = kmalloc(256, GFP_KERNEL);
7810         if (!data)
7811                 return;
7812
7813         rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
7814                              BNX2_VPD_LEN);
7815         if (rc)
7816                 goto vpd_done;
7817
7818         for (i = 0; i < BNX2_VPD_LEN; i += 4) {
7819                 data[i] = data[i + BNX2_VPD_LEN + 3];
7820                 data[i + 1] = data[i + BNX2_VPD_LEN + 2];
7821                 data[i + 2] = data[i + BNX2_VPD_LEN + 1];
7822                 data[i + 3] = data[i + BNX2_VPD_LEN];
7823         }
7824
7825         i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
7826         if (i < 0)
7827                 goto vpd_done;
7828
7829         rosize = pci_vpd_lrdt_size(&data[i]);
7830         i += PCI_VPD_LRDT_TAG_SIZE;
7831         block_end = i + rosize;
7832
7833         if (block_end > BNX2_VPD_LEN)
7834                 goto vpd_done;
7835
7836         j = pci_vpd_find_info_keyword(data, i, rosize,
7837                                       PCI_VPD_RO_KEYWORD_MFR_ID);
7838         if (j < 0)
7839                 goto vpd_done;
7840
7841         len = pci_vpd_info_field_size(&data[j]);
7842
7843         j += PCI_VPD_INFO_FLD_HDR_SIZE;
7844         if (j + len > block_end || len != 4 ||
7845             memcmp(&data[j], "1028", 4))
7846                 goto vpd_done;
7847
7848         j = pci_vpd_find_info_keyword(data, i, rosize,
7849                                       PCI_VPD_RO_KEYWORD_VENDOR0);
7850         if (j < 0)
7851                 goto vpd_done;
7852
7853         len = pci_vpd_info_field_size(&data[j]);
7854
7855         j += PCI_VPD_INFO_FLD_HDR_SIZE;
7856         if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
7857                 goto vpd_done;
7858
7859         memcpy(bp->fw_version, &data[j], len);
7860         bp->fw_version[len] = ' ';
7861
7862 vpd_done:
7863         kfree(data);
7864 }
7865
7866 static int __devinit
7867 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7868 {
7869         struct bnx2 *bp;
7870         unsigned long mem_len;
7871         int rc, i, j;
7872         u32 reg;
7873         u64 dma_mask, persist_dma_mask;
7874
7875         SET_NETDEV_DEV(dev, &pdev->dev);
7876         bp = netdev_priv(dev);
7877
7878         bp->flags = 0;
7879         bp->phy_flags = 0;
7880
7881         bp->temp_stats_blk =
7882                 kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
7883
7884         if (bp->temp_stats_blk == NULL) {
7885                 rc = -ENOMEM;
7886                 goto err_out;
7887         }
7888
7889         /* enable device (incl. PCI PM wakeup), and bus-mastering */
7890         rc = pci_enable_device(pdev);
7891         if (rc) {
7892                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
7893                 goto err_out;
7894         }
7895
7896         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7897                 dev_err(&pdev->dev,
7898                         "Cannot find PCI device base address, aborting\n");
7899                 rc = -ENODEV;
7900                 goto err_out_disable;
7901         }
7902
7903         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7904         if (rc) {
7905                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
7906                 goto err_out_disable;
7907         }
7908
7909         pci_set_master(pdev);
7910         pci_save_state(pdev);
7911
7912         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7913         if (bp->pm_cap == 0) {
7914                 dev_err(&pdev->dev,
7915                         "Cannot find power management capability, aborting\n");
7916                 rc = -EIO;
7917                 goto err_out_release;
7918         }
7919
7920         bp->dev = dev;
7921         bp->pdev = pdev;
7922
7923         spin_lock_init(&bp->phy_lock);
7924         spin_lock_init(&bp->indirect_lock);
7925 #ifdef BCM_CNIC
7926         mutex_init(&bp->cnic_lock);
7927 #endif
7928         INIT_WORK(&bp->reset_task, bnx2_reset_task);
7929
7930         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7931         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
7932         dev->mem_end = dev->mem_start + mem_len;
7933         dev->irq = pdev->irq;
7934
7935         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7936
7937         if (!bp->regview) {
7938                 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
7939                 rc = -ENOMEM;
7940                 goto err_out_release;
7941         }
7942
7943         /* Configure byte swap and enable write to the reg_window registers.
7944          * Rely on CPU to do target byte swapping on big endian systems
7945          * The chip's target access swapping will not swap all accesses
7946          */
7947         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7948                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7949                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7950
7951         bnx2_set_power_state(bp, PCI_D0);
7952
7953         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7954
7955         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7956                 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7957                         dev_err(&pdev->dev,
7958                                 "Cannot find PCIE capability, aborting\n");
7959                         rc = -EIO;
7960                         goto err_out_unmap;
7961                 }
7962                 bp->flags |= BNX2_FLAG_PCIE;
7963                 if (CHIP_REV(bp) == CHIP_REV_Ax)
7964                         bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7965         } else {
7966                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7967                 if (bp->pcix_cap == 0) {
7968                         dev_err(&pdev->dev,
7969                                 "Cannot find PCIX capability, aborting\n");
7970                         rc = -EIO;
7971                         goto err_out_unmap;
7972                 }
7973                 bp->flags |= BNX2_FLAG_BROKEN_STATS;
7974         }
7975
7976         if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7977                 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7978                         bp->flags |= BNX2_FLAG_MSIX_CAP;
7979         }
7980
7981         if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7982                 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7983                         bp->flags |= BNX2_FLAG_MSI_CAP;
7984         }
7985
7986         /* 5708 cannot support DMA addresses > 40-bit.  */
7987         if (CHIP_NUM(bp) == CHIP_NUM_5708)
7988                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
7989         else
7990                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
7991
7992         /* Configure DMA attributes. */
7993         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7994                 dev->features |= NETIF_F_HIGHDMA;
7995                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7996                 if (rc) {
7997                         dev_err(&pdev->dev,
7998                                 "pci_set_consistent_dma_mask failed, aborting\n");
7999                         goto err_out_unmap;
8000                 }
8001         } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
8002                 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
8003                 goto err_out_unmap;
8004         }
8005
8006         if (!(bp->flags & BNX2_FLAG_PCIE))
8007                 bnx2_get_pci_speed(bp);
8008
8009         /* 5706A0 may falsely detect SERR and PERR. */
8010         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8011                 reg = REG_RD(bp, PCI_COMMAND);
8012                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
8013                 REG_WR(bp, PCI_COMMAND, reg);
8014         }
8015         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
8016                 !(bp->flags & BNX2_FLAG_PCIX)) {
8017
8018                 dev_err(&pdev->dev,
8019                         "5706 A1 can only be used in a PCIX bus, aborting\n");
8020                 goto err_out_unmap;
8021         }
8022
8023         bnx2_init_nvram(bp);
8024
8025         reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8026
8027         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8028             BNX2_SHM_HDR_SIGNATURE_SIG) {
8029                 u32 off = PCI_FUNC(pdev->devfn) << 2;
8030
8031                 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8032         } else
8033                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8034
8035         /* Get the permanent MAC address.  First we need to make sure the
8036          * firmware is actually running.
8037          */
8038         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8039
8040         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8041             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8042                 dev_err(&pdev->dev, "Firmware not running, aborting\n");
8043                 rc = -ENODEV;
8044                 goto err_out_unmap;
8045         }
8046
8047         bnx2_read_vpd_fw_ver(bp);
8048
8049         j = strlen(bp->fw_version);
8050         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8051         for (i = 0; i < 3 && j < 24; i++) {
8052                 u8 num, k, skip0;
8053
8054                 if (i == 0) {
8055                         bp->fw_version[j++] = 'b';
8056                         bp->fw_version[j++] = 'c';
8057                         bp->fw_version[j++] = ' ';
8058                 }
8059                 num = (u8) (reg >> (24 - (i * 8)));
8060                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8061                         if (num >= k || !skip0 || k == 1) {
8062                                 bp->fw_version[j++] = (num / k) + '0';
8063                                 skip0 = 0;
8064                         }
8065                 }
8066                 if (i != 2)
8067                         bp->fw_version[j++] = '.';
8068         }
8069         reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8070         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8071                 bp->wol = 1;
8072
8073         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8074                 bp->flags |= BNX2_FLAG_ASF_ENABLE;
8075
8076                 for (i = 0; i < 30; i++) {
8077                         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8078                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8079                                 break;
8080                         msleep(10);
8081                 }
8082         }
8083         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8084         reg &= BNX2_CONDITION_MFW_RUN_MASK;
8085         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8086             reg != BNX2_CONDITION_MFW_RUN_NONE) {
8087                 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8088
8089                 if (j < 32)
8090                         bp->fw_version[j++] = ' ';
8091                 for (i = 0; i < 3 && j < 28; i++) {
8092                         reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8093                         reg = swab32(reg);
8094                         memcpy(&bp->fw_version[j], &reg, 4);
8095                         j += 4;
8096                 }
8097         }
8098
8099         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8100         bp->mac_addr[0] = (u8) (reg >> 8);
8101         bp->mac_addr[1] = (u8) reg;
8102
8103         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8104         bp->mac_addr[2] = (u8) (reg >> 24);
8105         bp->mac_addr[3] = (u8) (reg >> 16);
8106         bp->mac_addr[4] = (u8) (reg >> 8);
8107         bp->mac_addr[5] = (u8) reg;
8108
8109         bp->tx_ring_size = MAX_TX_DESC_CNT;
8110         bnx2_set_rx_ring_size(bp, 255);
8111
8112         bp->rx_csum = 1;
8113
8114         bp->tx_quick_cons_trip_int = 2;
8115         bp->tx_quick_cons_trip = 20;
8116         bp->tx_ticks_int = 18;
8117         bp->tx_ticks = 80;
8118
8119         bp->rx_quick_cons_trip_int = 2;
8120         bp->rx_quick_cons_trip = 12;
8121         bp->rx_ticks_int = 18;
8122         bp->rx_ticks = 18;
8123
8124         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8125
8126         bp->current_interval = BNX2_TIMER_INTERVAL;
8127
8128         bp->phy_addr = 1;
8129
8130         /* Disable WOL support if we are running on a SERDES chip. */
8131         if (CHIP_NUM(bp) == CHIP_NUM_5709)
8132                 bnx2_get_5709_media(bp);
8133         else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
8134                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8135
8136         bp->phy_port = PORT_TP;
8137         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8138                 bp->phy_port = PORT_FIBRE;
8139                 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8140                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8141                         bp->flags |= BNX2_FLAG_NO_WOL;
8142                         bp->wol = 0;
8143                 }
8144                 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
8145                         /* Don't do parallel detect on this board because of
8146                          * some board problems.  The link will not go down
8147                          * if we do parallel detect.
8148                          */
8149                         if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8150                             pdev->subsystem_device == 0x310c)
8151                                 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8152                 } else {
8153                         bp->phy_addr = 2;
8154                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8155                                 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8156                 }
8157         } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
8158                    CHIP_NUM(bp) == CHIP_NUM_5708)
8159                 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8160         else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
8161                  (CHIP_REV(bp) == CHIP_REV_Ax ||
8162                   CHIP_REV(bp) == CHIP_REV_Bx))
8163                 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8164
8165         bnx2_init_fw_cap(bp);
8166
8167         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
8168             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
8169             (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
8170             !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8171                 bp->flags |= BNX2_FLAG_NO_WOL;
8172                 bp->wol = 0;
8173         }
8174
8175         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8176                 bp->tx_quick_cons_trip_int =
8177                         bp->tx_quick_cons_trip;
8178                 bp->tx_ticks_int = bp->tx_ticks;
8179                 bp->rx_quick_cons_trip_int =
8180                         bp->rx_quick_cons_trip;
8181                 bp->rx_ticks_int = bp->rx_ticks;
8182                 bp->comp_prod_trip_int = bp->comp_prod_trip;
8183                 bp->com_ticks_int = bp->com_ticks;
8184                 bp->cmd_ticks_int = bp->cmd_ticks;
8185         }
8186
8187         /* Disable MSI on 5706 if AMD 8132 bridge is found.
8188          *
8189          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
8190          * with byte enables disabled on the unused 32-bit word.  This is legal
8191          * but causes problems on the AMD 8132 which will eventually stop
8192          * responding after a while.
8193          *
8194          * AMD believes this incompatibility is unique to the 5706, and
8195          * prefers to locally disable MSI rather than globally disabling it.
8196          */
8197         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
8198                 struct pci_dev *amd_8132 = NULL;
8199
8200                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8201                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
8202                                                   amd_8132))) {
8203
8204                         if (amd_8132->revision >= 0x10 &&
8205                             amd_8132->revision <= 0x13) {
8206                                 disable_msi = 1;
8207                                 pci_dev_put(amd_8132);
8208                                 break;
8209                         }
8210                 }
8211         }
8212
8213         bnx2_set_default_link(bp);
8214         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8215
8216         init_timer(&bp->timer);
8217         bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8218         bp->timer.data = (unsigned long) bp;
8219         bp->timer.function = bnx2_timer;
8220
8221         return 0;
8222
8223 err_out_unmap:
8224         if (bp->regview) {
8225                 iounmap(bp->regview);
8226                 bp->regview = NULL;
8227         }
8228
8229 err_out_release:
8230         pci_release_regions(pdev);
8231
8232 err_out_disable:
8233         pci_disable_device(pdev);
8234         pci_set_drvdata(pdev, NULL);
8235
8236 err_out:
8237         return rc;
8238 }
8239
8240 static char * __devinit
8241 bnx2_bus_string(struct bnx2 *bp, char *str)
8242 {
8243         char *s = str;
8244
8245         if (bp->flags & BNX2_FLAG_PCIE) {
8246                 s += sprintf(s, "PCI Express");
8247         } else {
8248                 s += sprintf(s, "PCI");
8249                 if (bp->flags & BNX2_FLAG_PCIX)
8250                         s += sprintf(s, "-X");
8251                 if (bp->flags & BNX2_FLAG_PCI_32BIT)
8252                         s += sprintf(s, " 32-bit");
8253                 else
8254                         s += sprintf(s, " 64-bit");
8255                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8256         }
8257         return str;
8258 }
8259
8260 static void
8261 bnx2_del_napi(struct bnx2 *bp)
8262 {
8263         int i;
8264
8265         for (i = 0; i < bp->irq_nvecs; i++)
8266                 netif_napi_del(&bp->bnx2_napi[i].napi);
8267 }
8268
8269 static void
8270 bnx2_init_napi(struct bnx2 *bp)
8271 {
8272         int i;
8273
8274         for (i = 0; i < bp->irq_nvecs; i++) {
8275                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8276                 int (*poll)(struct napi_struct *, int);
8277
8278                 if (i == 0)
8279                         poll = bnx2_poll;
8280                 else
8281                         poll = bnx2_poll_msix;
8282
8283                 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8284                 bnapi->bp = bp;
8285         }
8286 }
8287
8288 static const struct net_device_ops bnx2_netdev_ops = {
8289         .ndo_open               = bnx2_open,
8290         .ndo_start_xmit         = bnx2_start_xmit,
8291         .ndo_stop               = bnx2_close,
8292         .ndo_get_stats          = bnx2_get_stats,
8293         .ndo_set_rx_mode        = bnx2_set_rx_mode,
8294         .ndo_do_ioctl           = bnx2_ioctl,
8295         .ndo_validate_addr      = eth_validate_addr,
8296         .ndo_set_mac_address    = bnx2_change_mac_addr,
8297         .ndo_change_mtu         = bnx2_change_mtu,
8298         .ndo_tx_timeout         = bnx2_tx_timeout,
8299 #ifdef BCM_VLAN
8300         .ndo_vlan_rx_register   = bnx2_vlan_rx_register,
8301 #endif
8302 #ifdef CONFIG_NET_POLL_CONTROLLER
8303         .ndo_poll_controller    = poll_bnx2,
8304 #endif
8305 };
8306
8307 static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
8308 {
8309 #ifdef BCM_VLAN
8310         dev->vlan_features |= flags;
8311 #endif
8312 }
8313
8314 static int __devinit
8315 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8316 {
8317         static int version_printed = 0;
8318         struct net_device *dev = NULL;
8319         struct bnx2 *bp;
8320         int rc;
8321         char str[40];
8322
8323         if (version_printed++ == 0)
8324                 pr_info("%s", version);
8325
8326         /* dev zeroed in init_etherdev */
8327         dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8328
8329         if (!dev)
8330                 return -ENOMEM;
8331
8332         rc = bnx2_init_board(pdev, dev);
8333         if (rc < 0) {
8334                 free_netdev(dev);
8335                 return rc;
8336         }
8337
8338         dev->netdev_ops = &bnx2_netdev_ops;
8339         dev->watchdog_timeo = TX_TIMEOUT;
8340         dev->ethtool_ops = &bnx2_ethtool_ops;
8341
8342         bp = netdev_priv(dev);
8343
8344         pci_set_drvdata(pdev, dev);
8345
8346         rc = bnx2_request_firmware(bp);
8347         if (rc)
8348                 goto error;
8349
8350         memcpy(dev->dev_addr, bp->mac_addr, 6);
8351         memcpy(dev->perm_addr, bp->mac_addr, 6);
8352
8353         dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO |
8354                          NETIF_F_RXHASH;
8355         vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG);
8356         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8357                 dev->features |= NETIF_F_IPV6_CSUM;
8358                 vlan_features_add(dev, NETIF_F_IPV6_CSUM);
8359         }
8360 #ifdef BCM_VLAN
8361         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8362 #endif
8363         dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
8364         vlan_features_add(dev, NETIF_F_TSO | NETIF_F_TSO_ECN);
8365         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8366                 dev->features |= NETIF_F_TSO6;
8367                 vlan_features_add(dev, NETIF_F_TSO6);
8368         }
8369         if ((rc = register_netdev(dev))) {
8370                 dev_err(&pdev->dev, "Cannot register net device\n");
8371                 goto error;
8372         }
8373
8374         netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, node addr %pM\n",
8375                     board_info[ent->driver_data].name,
8376                     ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8377                     ((CHIP_ID(bp) & 0x0ff0) >> 4),
8378                     bnx2_bus_string(bp, str),
8379                     dev->base_addr,
8380                     bp->pdev->irq, dev->dev_addr);
8381
8382         return 0;
8383
8384 error:
8385         if (bp->mips_firmware)
8386                 release_firmware(bp->mips_firmware);
8387         if (bp->rv2p_firmware)
8388                 release_firmware(bp->rv2p_firmware);
8389
8390         if (bp->regview)
8391                 iounmap(bp->regview);
8392         pci_release_regions(pdev);
8393         pci_disable_device(pdev);
8394         pci_set_drvdata(pdev, NULL);
8395         free_netdev(dev);
8396         return rc;
8397 }
8398
8399 static void __devexit
8400 bnx2_remove_one(struct pci_dev *pdev)
8401 {
8402         struct net_device *dev = pci_get_drvdata(pdev);
8403         struct bnx2 *bp = netdev_priv(dev);
8404
8405         flush_scheduled_work();
8406
8407         unregister_netdev(dev);
8408
8409         if (bp->mips_firmware)
8410                 release_firmware(bp->mips_firmware);
8411         if (bp->rv2p_firmware)
8412                 release_firmware(bp->rv2p_firmware);
8413
8414         if (bp->regview)
8415                 iounmap(bp->regview);
8416
8417         kfree(bp->temp_stats_blk);
8418
8419         free_netdev(dev);
8420         pci_release_regions(pdev);
8421         pci_disable_device(pdev);
8422         pci_set_drvdata(pdev, NULL);
8423 }
8424
8425 static int
8426 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8427 {
8428         struct net_device *dev = pci_get_drvdata(pdev);
8429         struct bnx2 *bp = netdev_priv(dev);
8430
8431         /* PCI register 4 needs to be saved whether netif_running() or not.
8432          * MSI address and data need to be saved if using MSI and
8433          * netif_running().
8434          */
8435         pci_save_state(pdev);
8436         if (!netif_running(dev))
8437                 return 0;
8438
8439         flush_scheduled_work();
8440         bnx2_netif_stop(bp, true);
8441         netif_device_detach(dev);
8442         del_timer_sync(&bp->timer);
8443         bnx2_shutdown_chip(bp);
8444         bnx2_free_skbs(bp);
8445         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
8446         return 0;
8447 }
8448
8449 static int
8450 bnx2_resume(struct pci_dev *pdev)
8451 {
8452         struct net_device *dev = pci_get_drvdata(pdev);
8453         struct bnx2 *bp = netdev_priv(dev);
8454
8455         pci_restore_state(pdev);
8456         if (!netif_running(dev))
8457                 return 0;
8458
8459         bnx2_set_power_state(bp, PCI_D0);
8460         netif_device_attach(dev);
8461         bnx2_init_nic(bp, 1);
8462         bnx2_netif_start(bp, true);
8463         return 0;
8464 }
8465
8466 /**
8467  * bnx2_io_error_detected - called when PCI error is detected
8468  * @pdev: Pointer to PCI device
8469  * @state: The current pci connection state
8470  *
8471  * This function is called after a PCI bus error affecting
8472  * this device has been detected.
8473  */
8474 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8475                                                pci_channel_state_t state)
8476 {
8477         struct net_device *dev = pci_get_drvdata(pdev);
8478         struct bnx2 *bp = netdev_priv(dev);
8479
8480         rtnl_lock();
8481         netif_device_detach(dev);
8482
8483         if (state == pci_channel_io_perm_failure) {
8484                 rtnl_unlock();
8485                 return PCI_ERS_RESULT_DISCONNECT;
8486         }
8487
8488         if (netif_running(dev)) {
8489                 bnx2_netif_stop(bp, true);
8490                 del_timer_sync(&bp->timer);
8491                 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8492         }
8493
8494         pci_disable_device(pdev);
8495         rtnl_unlock();
8496
8497         /* Request a slot slot reset. */
8498         return PCI_ERS_RESULT_NEED_RESET;
8499 }
8500
8501 /**
8502  * bnx2_io_slot_reset - called after the pci bus has been reset.
8503  * @pdev: Pointer to PCI device
8504  *
8505  * Restart the card from scratch, as if from a cold-boot.
8506  */
8507 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8508 {
8509         struct net_device *dev = pci_get_drvdata(pdev);
8510         struct bnx2 *bp = netdev_priv(dev);
8511
8512         rtnl_lock();
8513         if (pci_enable_device(pdev)) {
8514                 dev_err(&pdev->dev,
8515                         "Cannot re-enable PCI device after reset\n");
8516                 rtnl_unlock();
8517                 return PCI_ERS_RESULT_DISCONNECT;
8518         }
8519         pci_set_master(pdev);
8520         pci_restore_state(pdev);
8521         pci_save_state(pdev);
8522
8523         if (netif_running(dev)) {
8524                 bnx2_set_power_state(bp, PCI_D0);
8525                 bnx2_init_nic(bp, 1);
8526         }
8527
8528         rtnl_unlock();
8529         return PCI_ERS_RESULT_RECOVERED;
8530 }
8531
8532 /**
8533  * bnx2_io_resume - called when traffic can start flowing again.
8534  * @pdev: Pointer to PCI device
8535  *
8536  * This callback is called when the error recovery driver tells us that
8537  * its OK to resume normal operation.
8538  */
8539 static void bnx2_io_resume(struct pci_dev *pdev)
8540 {
8541         struct net_device *dev = pci_get_drvdata(pdev);
8542         struct bnx2 *bp = netdev_priv(dev);
8543
8544         rtnl_lock();
8545         if (netif_running(dev))
8546                 bnx2_netif_start(bp, true);
8547
8548         netif_device_attach(dev);
8549         rtnl_unlock();
8550 }
8551
8552 static struct pci_error_handlers bnx2_err_handler = {
8553         .error_detected = bnx2_io_error_detected,
8554         .slot_reset     = bnx2_io_slot_reset,
8555         .resume         = bnx2_io_resume,
8556 };
8557
8558 static struct pci_driver bnx2_pci_driver = {
8559         .name           = DRV_MODULE_NAME,
8560         .id_table       = bnx2_pci_tbl,
8561         .probe          = bnx2_init_one,
8562         .remove         = __devexit_p(bnx2_remove_one),
8563         .suspend        = bnx2_suspend,
8564         .resume         = bnx2_resume,
8565         .err_handler    = &bnx2_err_handler,
8566 };
8567
8568 static int __init bnx2_init(void)
8569 {
8570         return pci_register_driver(&bnx2_pci_driver);
8571 }
8572
8573 static void __exit bnx2_cleanup(void)
8574 {
8575         pci_unregister_driver(&bnx2_pci_driver);
8576 }
8577
8578 module_init(bnx2_init);
8579 module_exit(bnx2_cleanup);
8580
8581
8582