]> bbs.cooldavid.org Git - net-next-2.6.git/blob - drivers/net/bnx2.c
Merge branch 'master' of /repos/git/net-next-2.6
[net-next-2.6.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2010 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
16
17 #include <linux/kernel.h>
18 #include <linux/timer.h>
19 #include <linux/errno.h>
20 #include <linux/ioport.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/interrupt.h>
24 #include <linux/pci.h>
25 #include <linux/init.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/bitops.h>
31 #include <asm/io.h>
32 #include <asm/irq.h>
33 #include <linux/delay.h>
34 #include <asm/byteorder.h>
35 #include <asm/page.h>
36 #include <linux/time.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/if_vlan.h>
40 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
41 #define BCM_VLAN 1
42 #endif
43 #include <net/ip.h>
44 #include <net/tcp.h>
45 #include <net/checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/prefetch.h>
49 #include <linux/cache.h>
50 #include <linux/firmware.h>
51 #include <linux/log2.h>
52
53 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54 #define BCM_CNIC 1
55 #include "cnic_if.h"
56 #endif
57 #include "bnx2.h"
58 #include "bnx2_fw.h"
59
60 #define DRV_MODULE_NAME         "bnx2"
61 #define DRV_MODULE_VERSION      "2.0.15"
62 #define DRV_MODULE_RELDATE      "May 4, 2010"
63 #define FW_MIPS_FILE_06         "bnx2/bnx2-mips-06-5.0.0.j6.fw"
64 #define FW_RV2P_FILE_06         "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
65 #define FW_MIPS_FILE_09         "bnx2/bnx2-mips-09-5.0.0.j15.fw"
66 #define FW_RV2P_FILE_09_Ax      "bnx2/bnx2-rv2p-09ax-5.0.0.j10.fw"
67 #define FW_RV2P_FILE_09         "bnx2/bnx2-rv2p-09-5.0.0.j10.fw"
68
69 #define RUN_AT(x) (jiffies + (x))
70
71 /* Time in jiffies before concluding the transmitter is hung. */
72 #define TX_TIMEOUT  (5*HZ)
73
74 static char version[] __devinitdata =
75         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76
77 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
78 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_MODULE_VERSION);
81 MODULE_FIRMWARE(FW_MIPS_FILE_06);
82 MODULE_FIRMWARE(FW_RV2P_FILE_06);
83 MODULE_FIRMWARE(FW_MIPS_FILE_09);
84 MODULE_FIRMWARE(FW_RV2P_FILE_09);
85 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
86
87 static int disable_msi = 0;
88
89 module_param(disable_msi, int, 0);
90 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
91
92 typedef enum {
93         BCM5706 = 0,
94         NC370T,
95         NC370I,
96         BCM5706S,
97         NC370F,
98         BCM5708,
99         BCM5708S,
100         BCM5709,
101         BCM5709S,
102         BCM5716,
103         BCM5716S,
104 } board_t;
105
106 /* indexed by board_t, above */
107 static struct {
108         char *name;
109 } board_info[] __devinitdata = {
110         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
111         { "HP NC370T Multifunction Gigabit Server Adapter" },
112         { "HP NC370i Multifunction Gigabit Server Adapter" },
113         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
114         { "HP NC370F Multifunction Gigabit Server Adapter" },
115         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
116         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
117         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
118         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
119         { "Broadcom NetXtreme II BCM5716 1000Base-T" },
120         { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
121         };
122
123 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
124         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
125           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
126         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
127           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
128         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
129           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
130         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
131           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
132         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
133           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
134         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
135           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
136         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
137           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
138         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
139           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
140         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
141           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
142         { PCI_VENDOR_ID_BROADCOM, 0x163b,
143           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
144         { PCI_VENDOR_ID_BROADCOM, 0x163c,
145           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
146         { 0, }
147 };
148
149 static const struct flash_spec flash_table[] =
150 {
151 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
152 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
153         /* Slow EEPROM */
154         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
155          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
156          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
157          "EEPROM - slow"},
158         /* Expansion entry 0001 */
159         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
160          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
161          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
162          "Entry 0001"},
163         /* Saifun SA25F010 (non-buffered flash) */
164         /* strap, cfg1, & write1 need updates */
165         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
166          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
167          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
168          "Non-buffered flash (128kB)"},
169         /* Saifun SA25F020 (non-buffered flash) */
170         /* strap, cfg1, & write1 need updates */
171         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
172          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
173          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
174          "Non-buffered flash (256kB)"},
175         /* Expansion entry 0100 */
176         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
177          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
178          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
179          "Entry 0100"},
180         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
181         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
182          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
183          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
184          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
185         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
186         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
187          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
188          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
189          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
190         /* Saifun SA25F005 (non-buffered flash) */
191         /* strap, cfg1, & write1 need updates */
192         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
193          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
195          "Non-buffered flash (64kB)"},
196         /* Fast EEPROM */
197         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
198          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
199          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
200          "EEPROM - fast"},
201         /* Expansion entry 1001 */
202         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
203          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205          "Entry 1001"},
206         /* Expansion entry 1010 */
207         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
208          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
209          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
210          "Entry 1010"},
211         /* ATMEL AT45DB011B (buffered flash) */
212         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
213          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
215          "Buffered flash (128kB)"},
216         /* Expansion entry 1100 */
217         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
218          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
219          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
220          "Entry 1100"},
221         /* Expansion entry 1101 */
222         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
223          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
224          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
225          "Entry 1101"},
226         /* Ateml Expansion entry 1110 */
227         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
228          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
229          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
230          "Entry 1110 (Atmel)"},
231         /* ATMEL AT45DB021B (buffered flash) */
232         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
233          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
234          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
235          "Buffered flash (256kB)"},
236 };
237
238 static const struct flash_spec flash_5709 = {
239         .flags          = BNX2_NV_BUFFERED,
240         .page_bits      = BCM5709_FLASH_PAGE_BITS,
241         .page_size      = BCM5709_FLASH_PAGE_SIZE,
242         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
243         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
244         .name           = "5709 Buffered flash (256kB)",
245 };
246
247 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
248
249 static void bnx2_init_napi(struct bnx2 *bp);
250 static void bnx2_del_napi(struct bnx2 *bp);
251
252 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
253 {
254         u32 diff;
255
256         smp_mb();
257
258         /* The ring uses 256 indices for 255 entries, one of them
259          * needs to be skipped.
260          */
261         diff = txr->tx_prod - txr->tx_cons;
262         if (unlikely(diff >= TX_DESC_CNT)) {
263                 diff &= 0xffff;
264                 if (diff == TX_DESC_CNT)
265                         diff = MAX_TX_DESC_CNT;
266         }
267         return (bp->tx_ring_size - diff);
268 }
269
270 static u32
271 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
272 {
273         u32 val;
274
275         spin_lock_bh(&bp->indirect_lock);
276         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
277         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
278         spin_unlock_bh(&bp->indirect_lock);
279         return val;
280 }
281
282 static void
283 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
284 {
285         spin_lock_bh(&bp->indirect_lock);
286         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
287         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
288         spin_unlock_bh(&bp->indirect_lock);
289 }
290
291 static void
292 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
293 {
294         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
295 }
296
297 static u32
298 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
299 {
300         return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
301 }
302
303 static void
304 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
305 {
306         offset += cid_addr;
307         spin_lock_bh(&bp->indirect_lock);
308         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
309                 int i;
310
311                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
312                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
313                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
314                 for (i = 0; i < 5; i++) {
315                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
316                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
317                                 break;
318                         udelay(5);
319                 }
320         } else {
321                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
322                 REG_WR(bp, BNX2_CTX_DATA, val);
323         }
324         spin_unlock_bh(&bp->indirect_lock);
325 }
326
327 #ifdef BCM_CNIC
328 static int
329 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
330 {
331         struct bnx2 *bp = netdev_priv(dev);
332         struct drv_ctl_io *io = &info->data.io;
333
334         switch (info->cmd) {
335         case DRV_CTL_IO_WR_CMD:
336                 bnx2_reg_wr_ind(bp, io->offset, io->data);
337                 break;
338         case DRV_CTL_IO_RD_CMD:
339                 io->data = bnx2_reg_rd_ind(bp, io->offset);
340                 break;
341         case DRV_CTL_CTX_WR_CMD:
342                 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
343                 break;
344         default:
345                 return -EINVAL;
346         }
347         return 0;
348 }
349
350 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
351 {
352         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
353         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
354         int sb_id;
355
356         if (bp->flags & BNX2_FLAG_USING_MSIX) {
357                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
358                 bnapi->cnic_present = 0;
359                 sb_id = bp->irq_nvecs;
360                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
361         } else {
362                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
363                 bnapi->cnic_tag = bnapi->last_status_idx;
364                 bnapi->cnic_present = 1;
365                 sb_id = 0;
366                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
367         }
368
369         cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
370         cp->irq_arr[0].status_blk = (void *)
371                 ((unsigned long) bnapi->status_blk.msi +
372                 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
373         cp->irq_arr[0].status_blk_num = sb_id;
374         cp->num_irq = 1;
375 }
376
377 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
378                               void *data)
379 {
380         struct bnx2 *bp = netdev_priv(dev);
381         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
382
383         if (ops == NULL)
384                 return -EINVAL;
385
386         if (cp->drv_state & CNIC_DRV_STATE_REGD)
387                 return -EBUSY;
388
389         bp->cnic_data = data;
390         rcu_assign_pointer(bp->cnic_ops, ops);
391
392         cp->num_irq = 0;
393         cp->drv_state = CNIC_DRV_STATE_REGD;
394
395         bnx2_setup_cnic_irq_info(bp);
396
397         return 0;
398 }
399
400 static int bnx2_unregister_cnic(struct net_device *dev)
401 {
402         struct bnx2 *bp = netdev_priv(dev);
403         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
404         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
405
406         mutex_lock(&bp->cnic_lock);
407         cp->drv_state = 0;
408         bnapi->cnic_present = 0;
409         rcu_assign_pointer(bp->cnic_ops, NULL);
410         mutex_unlock(&bp->cnic_lock);
411         synchronize_rcu();
412         return 0;
413 }
414
415 struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
416 {
417         struct bnx2 *bp = netdev_priv(dev);
418         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
419
420         cp->drv_owner = THIS_MODULE;
421         cp->chip_id = bp->chip_id;
422         cp->pdev = bp->pdev;
423         cp->io_base = bp->regview;
424         cp->drv_ctl = bnx2_drv_ctl;
425         cp->drv_register_cnic = bnx2_register_cnic;
426         cp->drv_unregister_cnic = bnx2_unregister_cnic;
427
428         return cp;
429 }
430 EXPORT_SYMBOL(bnx2_cnic_probe);
431
432 static void
433 bnx2_cnic_stop(struct bnx2 *bp)
434 {
435         struct cnic_ops *c_ops;
436         struct cnic_ctl_info info;
437
438         mutex_lock(&bp->cnic_lock);
439         c_ops = bp->cnic_ops;
440         if (c_ops) {
441                 info.cmd = CNIC_CTL_STOP_CMD;
442                 c_ops->cnic_ctl(bp->cnic_data, &info);
443         }
444         mutex_unlock(&bp->cnic_lock);
445 }
446
447 static void
448 bnx2_cnic_start(struct bnx2 *bp)
449 {
450         struct cnic_ops *c_ops;
451         struct cnic_ctl_info info;
452
453         mutex_lock(&bp->cnic_lock);
454         c_ops = bp->cnic_ops;
455         if (c_ops) {
456                 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
457                         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
458
459                         bnapi->cnic_tag = bnapi->last_status_idx;
460                 }
461                 info.cmd = CNIC_CTL_START_CMD;
462                 c_ops->cnic_ctl(bp->cnic_data, &info);
463         }
464         mutex_unlock(&bp->cnic_lock);
465 }
466
467 #else
468
469 static void
470 bnx2_cnic_stop(struct bnx2 *bp)
471 {
472 }
473
474 static void
475 bnx2_cnic_start(struct bnx2 *bp)
476 {
477 }
478
479 #endif
480
481 static int
482 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
483 {
484         u32 val1;
485         int i, ret;
486
487         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
488                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
489                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
490
491                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
492                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
493
494                 udelay(40);
495         }
496
497         val1 = (bp->phy_addr << 21) | (reg << 16) |
498                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
499                 BNX2_EMAC_MDIO_COMM_START_BUSY;
500         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
501
502         for (i = 0; i < 50; i++) {
503                 udelay(10);
504
505                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
506                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
507                         udelay(5);
508
509                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
510                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
511
512                         break;
513                 }
514         }
515
516         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
517                 *val = 0x0;
518                 ret = -EBUSY;
519         }
520         else {
521                 *val = val1;
522                 ret = 0;
523         }
524
525         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
526                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
527                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
528
529                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
530                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
531
532                 udelay(40);
533         }
534
535         return ret;
536 }
537
538 static int
539 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
540 {
541         u32 val1;
542         int i, ret;
543
544         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
545                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
546                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
547
548                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
549                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
550
551                 udelay(40);
552         }
553
554         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
555                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
556                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
557         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
558
559         for (i = 0; i < 50; i++) {
560                 udelay(10);
561
562                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
563                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
564                         udelay(5);
565                         break;
566                 }
567         }
568
569         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
570                 ret = -EBUSY;
571         else
572                 ret = 0;
573
574         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
575                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
576                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
577
578                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
579                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
580
581                 udelay(40);
582         }
583
584         return ret;
585 }
586
587 static void
588 bnx2_disable_int(struct bnx2 *bp)
589 {
590         int i;
591         struct bnx2_napi *bnapi;
592
593         for (i = 0; i < bp->irq_nvecs; i++) {
594                 bnapi = &bp->bnx2_napi[i];
595                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
596                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
597         }
598         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
599 }
600
601 static void
602 bnx2_enable_int(struct bnx2 *bp)
603 {
604         int i;
605         struct bnx2_napi *bnapi;
606
607         for (i = 0; i < bp->irq_nvecs; i++) {
608                 bnapi = &bp->bnx2_napi[i];
609
610                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
611                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
612                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
613                        bnapi->last_status_idx);
614
615                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
616                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
617                        bnapi->last_status_idx);
618         }
619         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
620 }
621
622 static void
623 bnx2_disable_int_sync(struct bnx2 *bp)
624 {
625         int i;
626
627         atomic_inc(&bp->intr_sem);
628         if (!netif_running(bp->dev))
629                 return;
630
631         bnx2_disable_int(bp);
632         for (i = 0; i < bp->irq_nvecs; i++)
633                 synchronize_irq(bp->irq_tbl[i].vector);
634 }
635
636 static void
637 bnx2_napi_disable(struct bnx2 *bp)
638 {
639         int i;
640
641         for (i = 0; i < bp->irq_nvecs; i++)
642                 napi_disable(&bp->bnx2_napi[i].napi);
643 }
644
645 static void
646 bnx2_napi_enable(struct bnx2 *bp)
647 {
648         int i;
649
650         for (i = 0; i < bp->irq_nvecs; i++)
651                 napi_enable(&bp->bnx2_napi[i].napi);
652 }
653
654 static void
655 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
656 {
657         if (stop_cnic)
658                 bnx2_cnic_stop(bp);
659         if (netif_running(bp->dev)) {
660                 bnx2_napi_disable(bp);
661                 netif_tx_disable(bp->dev);
662         }
663         bnx2_disable_int_sync(bp);
664         netif_carrier_off(bp->dev);     /* prevent tx timeout */
665 }
666
667 static void
668 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
669 {
670         if (atomic_dec_and_test(&bp->intr_sem)) {
671                 if (netif_running(bp->dev)) {
672                         netif_tx_wake_all_queues(bp->dev);
673                         spin_lock_bh(&bp->phy_lock);
674                         if (bp->link_up)
675                                 netif_carrier_on(bp->dev);
676                         spin_unlock_bh(&bp->phy_lock);
677                         bnx2_napi_enable(bp);
678                         bnx2_enable_int(bp);
679                         if (start_cnic)
680                                 bnx2_cnic_start(bp);
681                 }
682         }
683 }
684
685 static void
686 bnx2_free_tx_mem(struct bnx2 *bp)
687 {
688         int i;
689
690         for (i = 0; i < bp->num_tx_rings; i++) {
691                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
692                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
693
694                 if (txr->tx_desc_ring) {
695                         pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
696                                             txr->tx_desc_ring,
697                                             txr->tx_desc_mapping);
698                         txr->tx_desc_ring = NULL;
699                 }
700                 kfree(txr->tx_buf_ring);
701                 txr->tx_buf_ring = NULL;
702         }
703 }
704
705 static void
706 bnx2_free_rx_mem(struct bnx2 *bp)
707 {
708         int i;
709
710         for (i = 0; i < bp->num_rx_rings; i++) {
711                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
712                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
713                 int j;
714
715                 for (j = 0; j < bp->rx_max_ring; j++) {
716                         if (rxr->rx_desc_ring[j])
717                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
718                                                     rxr->rx_desc_ring[j],
719                                                     rxr->rx_desc_mapping[j]);
720                         rxr->rx_desc_ring[j] = NULL;
721                 }
722                 vfree(rxr->rx_buf_ring);
723                 rxr->rx_buf_ring = NULL;
724
725                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
726                         if (rxr->rx_pg_desc_ring[j])
727                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
728                                                     rxr->rx_pg_desc_ring[j],
729                                                     rxr->rx_pg_desc_mapping[j]);
730                         rxr->rx_pg_desc_ring[j] = NULL;
731                 }
732                 vfree(rxr->rx_pg_ring);
733                 rxr->rx_pg_ring = NULL;
734         }
735 }
736
737 static int
738 bnx2_alloc_tx_mem(struct bnx2 *bp)
739 {
740         int i;
741
742         for (i = 0; i < bp->num_tx_rings; i++) {
743                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
744                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
745
746                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
747                 if (txr->tx_buf_ring == NULL)
748                         return -ENOMEM;
749
750                 txr->tx_desc_ring =
751                         pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
752                                              &txr->tx_desc_mapping);
753                 if (txr->tx_desc_ring == NULL)
754                         return -ENOMEM;
755         }
756         return 0;
757 }
758
759 static int
760 bnx2_alloc_rx_mem(struct bnx2 *bp)
761 {
762         int i;
763
764         for (i = 0; i < bp->num_rx_rings; i++) {
765                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
766                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
767                 int j;
768
769                 rxr->rx_buf_ring =
770                         vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
771                 if (rxr->rx_buf_ring == NULL)
772                         return -ENOMEM;
773
774                 memset(rxr->rx_buf_ring, 0,
775                        SW_RXBD_RING_SIZE * bp->rx_max_ring);
776
777                 for (j = 0; j < bp->rx_max_ring; j++) {
778                         rxr->rx_desc_ring[j] =
779                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
780                                                      &rxr->rx_desc_mapping[j]);
781                         if (rxr->rx_desc_ring[j] == NULL)
782                                 return -ENOMEM;
783
784                 }
785
786                 if (bp->rx_pg_ring_size) {
787                         rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
788                                                   bp->rx_max_pg_ring);
789                         if (rxr->rx_pg_ring == NULL)
790                                 return -ENOMEM;
791
792                         memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
793                                bp->rx_max_pg_ring);
794                 }
795
796                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
797                         rxr->rx_pg_desc_ring[j] =
798                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
799                                                 &rxr->rx_pg_desc_mapping[j]);
800                         if (rxr->rx_pg_desc_ring[j] == NULL)
801                                 return -ENOMEM;
802
803                 }
804         }
805         return 0;
806 }
807
808 static void
809 bnx2_free_mem(struct bnx2 *bp)
810 {
811         int i;
812         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
813
814         bnx2_free_tx_mem(bp);
815         bnx2_free_rx_mem(bp);
816
817         for (i = 0; i < bp->ctx_pages; i++) {
818                 if (bp->ctx_blk[i]) {
819                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
820                                             bp->ctx_blk[i],
821                                             bp->ctx_blk_mapping[i]);
822                         bp->ctx_blk[i] = NULL;
823                 }
824         }
825         if (bnapi->status_blk.msi) {
826                 pci_free_consistent(bp->pdev, bp->status_stats_size,
827                                     bnapi->status_blk.msi,
828                                     bp->status_blk_mapping);
829                 bnapi->status_blk.msi = NULL;
830                 bp->stats_blk = NULL;
831         }
832 }
833
834 static int
835 bnx2_alloc_mem(struct bnx2 *bp)
836 {
837         int i, status_blk_size, err;
838         struct bnx2_napi *bnapi;
839         void *status_blk;
840
841         /* Combine status and statistics blocks into one allocation. */
842         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
843         if (bp->flags & BNX2_FLAG_MSIX_CAP)
844                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
845                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
846         bp->status_stats_size = status_blk_size +
847                                 sizeof(struct statistics_block);
848
849         status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
850                                           &bp->status_blk_mapping);
851         if (status_blk == NULL)
852                 goto alloc_mem_err;
853
854         memset(status_blk, 0, bp->status_stats_size);
855
856         bnapi = &bp->bnx2_napi[0];
857         bnapi->status_blk.msi = status_blk;
858         bnapi->hw_tx_cons_ptr =
859                 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
860         bnapi->hw_rx_cons_ptr =
861                 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
862         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
863                 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
864                         struct status_block_msix *sblk;
865
866                         bnapi = &bp->bnx2_napi[i];
867
868                         sblk = (void *) (status_blk +
869                                          BNX2_SBLK_MSIX_ALIGN_SIZE * i);
870                         bnapi->status_blk.msix = sblk;
871                         bnapi->hw_tx_cons_ptr =
872                                 &sblk->status_tx_quick_consumer_index;
873                         bnapi->hw_rx_cons_ptr =
874                                 &sblk->status_rx_quick_consumer_index;
875                         bnapi->int_num = i << 24;
876                 }
877         }
878
879         bp->stats_blk = status_blk + status_blk_size;
880
881         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
882
883         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
884                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
885                 if (bp->ctx_pages == 0)
886                         bp->ctx_pages = 1;
887                 for (i = 0; i < bp->ctx_pages; i++) {
888                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
889                                                 BCM_PAGE_SIZE,
890                                                 &bp->ctx_blk_mapping[i]);
891                         if (bp->ctx_blk[i] == NULL)
892                                 goto alloc_mem_err;
893                 }
894         }
895
896         err = bnx2_alloc_rx_mem(bp);
897         if (err)
898                 goto alloc_mem_err;
899
900         err = bnx2_alloc_tx_mem(bp);
901         if (err)
902                 goto alloc_mem_err;
903
904         return 0;
905
906 alloc_mem_err:
907         bnx2_free_mem(bp);
908         return -ENOMEM;
909 }
910
911 static void
912 bnx2_report_fw_link(struct bnx2 *bp)
913 {
914         u32 fw_link_status = 0;
915
916         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
917                 return;
918
919         if (bp->link_up) {
920                 u32 bmsr;
921
922                 switch (bp->line_speed) {
923                 case SPEED_10:
924                         if (bp->duplex == DUPLEX_HALF)
925                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
926                         else
927                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
928                         break;
929                 case SPEED_100:
930                         if (bp->duplex == DUPLEX_HALF)
931                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
932                         else
933                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
934                         break;
935                 case SPEED_1000:
936                         if (bp->duplex == DUPLEX_HALF)
937                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
938                         else
939                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
940                         break;
941                 case SPEED_2500:
942                         if (bp->duplex == DUPLEX_HALF)
943                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
944                         else
945                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
946                         break;
947                 }
948
949                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
950
951                 if (bp->autoneg) {
952                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
953
954                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
955                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
956
957                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
958                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
959                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
960                         else
961                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
962                 }
963         }
964         else
965                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
966
967         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
968 }
969
970 static char *
971 bnx2_xceiver_str(struct bnx2 *bp)
972 {
973         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
974                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
975                  "Copper"));
976 }
977
978 static void
979 bnx2_report_link(struct bnx2 *bp)
980 {
981         if (bp->link_up) {
982                 netif_carrier_on(bp->dev);
983                 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
984                             bnx2_xceiver_str(bp),
985                             bp->line_speed,
986                             bp->duplex == DUPLEX_FULL ? "full" : "half");
987
988                 if (bp->flow_ctrl) {
989                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
990                                 pr_cont(", receive ");
991                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
992                                         pr_cont("& transmit ");
993                         }
994                         else {
995                                 pr_cont(", transmit ");
996                         }
997                         pr_cont("flow control ON");
998                 }
999                 pr_cont("\n");
1000         } else {
1001                 netif_carrier_off(bp->dev);
1002                 netdev_err(bp->dev, "NIC %s Link is Down\n",
1003                            bnx2_xceiver_str(bp));
1004         }
1005
1006         bnx2_report_fw_link(bp);
1007 }
1008
1009 static void
1010 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1011 {
1012         u32 local_adv, remote_adv;
1013
1014         bp->flow_ctrl = 0;
1015         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1016                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1017
1018                 if (bp->duplex == DUPLEX_FULL) {
1019                         bp->flow_ctrl = bp->req_flow_ctrl;
1020                 }
1021                 return;
1022         }
1023
1024         if (bp->duplex != DUPLEX_FULL) {
1025                 return;
1026         }
1027
1028         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1029             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1030                 u32 val;
1031
1032                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1033                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1034                         bp->flow_ctrl |= FLOW_CTRL_TX;
1035                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1036                         bp->flow_ctrl |= FLOW_CTRL_RX;
1037                 return;
1038         }
1039
1040         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1041         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1042
1043         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1044                 u32 new_local_adv = 0;
1045                 u32 new_remote_adv = 0;
1046
1047                 if (local_adv & ADVERTISE_1000XPAUSE)
1048                         new_local_adv |= ADVERTISE_PAUSE_CAP;
1049                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1050                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
1051                 if (remote_adv & ADVERTISE_1000XPAUSE)
1052                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
1053                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1054                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1055
1056                 local_adv = new_local_adv;
1057                 remote_adv = new_remote_adv;
1058         }
1059
1060         /* See Table 28B-3 of 802.3ab-1999 spec. */
1061         if (local_adv & ADVERTISE_PAUSE_CAP) {
1062                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1063                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1064                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1065                         }
1066                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1067                                 bp->flow_ctrl = FLOW_CTRL_RX;
1068                         }
1069                 }
1070                 else {
1071                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1072                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1073                         }
1074                 }
1075         }
1076         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1077                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1078                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1079
1080                         bp->flow_ctrl = FLOW_CTRL_TX;
1081                 }
1082         }
1083 }
1084
1085 static int
1086 bnx2_5709s_linkup(struct bnx2 *bp)
1087 {
1088         u32 val, speed;
1089
1090         bp->link_up = 1;
1091
1092         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1093         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1094         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1095
1096         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1097                 bp->line_speed = bp->req_line_speed;
1098                 bp->duplex = bp->req_duplex;
1099                 return 0;
1100         }
1101         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1102         switch (speed) {
1103                 case MII_BNX2_GP_TOP_AN_SPEED_10:
1104                         bp->line_speed = SPEED_10;
1105                         break;
1106                 case MII_BNX2_GP_TOP_AN_SPEED_100:
1107                         bp->line_speed = SPEED_100;
1108                         break;
1109                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1110                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1111                         bp->line_speed = SPEED_1000;
1112                         break;
1113                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1114                         bp->line_speed = SPEED_2500;
1115                         break;
1116         }
1117         if (val & MII_BNX2_GP_TOP_AN_FD)
1118                 bp->duplex = DUPLEX_FULL;
1119         else
1120                 bp->duplex = DUPLEX_HALF;
1121         return 0;
1122 }
1123
1124 static int
1125 bnx2_5708s_linkup(struct bnx2 *bp)
1126 {
1127         u32 val;
1128
1129         bp->link_up = 1;
1130         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1131         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1132                 case BCM5708S_1000X_STAT1_SPEED_10:
1133                         bp->line_speed = SPEED_10;
1134                         break;
1135                 case BCM5708S_1000X_STAT1_SPEED_100:
1136                         bp->line_speed = SPEED_100;
1137                         break;
1138                 case BCM5708S_1000X_STAT1_SPEED_1G:
1139                         bp->line_speed = SPEED_1000;
1140                         break;
1141                 case BCM5708S_1000X_STAT1_SPEED_2G5:
1142                         bp->line_speed = SPEED_2500;
1143                         break;
1144         }
1145         if (val & BCM5708S_1000X_STAT1_FD)
1146                 bp->duplex = DUPLEX_FULL;
1147         else
1148                 bp->duplex = DUPLEX_HALF;
1149
1150         return 0;
1151 }
1152
1153 static int
1154 bnx2_5706s_linkup(struct bnx2 *bp)
1155 {
1156         u32 bmcr, local_adv, remote_adv, common;
1157
1158         bp->link_up = 1;
1159         bp->line_speed = SPEED_1000;
1160
1161         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1162         if (bmcr & BMCR_FULLDPLX) {
1163                 bp->duplex = DUPLEX_FULL;
1164         }
1165         else {
1166                 bp->duplex = DUPLEX_HALF;
1167         }
1168
1169         if (!(bmcr & BMCR_ANENABLE)) {
1170                 return 0;
1171         }
1172
1173         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1174         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1175
1176         common = local_adv & remote_adv;
1177         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1178
1179                 if (common & ADVERTISE_1000XFULL) {
1180                         bp->duplex = DUPLEX_FULL;
1181                 }
1182                 else {
1183                         bp->duplex = DUPLEX_HALF;
1184                 }
1185         }
1186
1187         return 0;
1188 }
1189
1190 static int
1191 bnx2_copper_linkup(struct bnx2 *bp)
1192 {
1193         u32 bmcr;
1194
1195         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1196         if (bmcr & BMCR_ANENABLE) {
1197                 u32 local_adv, remote_adv, common;
1198
1199                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1200                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1201
1202                 common = local_adv & (remote_adv >> 2);
1203                 if (common & ADVERTISE_1000FULL) {
1204                         bp->line_speed = SPEED_1000;
1205                         bp->duplex = DUPLEX_FULL;
1206                 }
1207                 else if (common & ADVERTISE_1000HALF) {
1208                         bp->line_speed = SPEED_1000;
1209                         bp->duplex = DUPLEX_HALF;
1210                 }
1211                 else {
1212                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1213                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1214
1215                         common = local_adv & remote_adv;
1216                         if (common & ADVERTISE_100FULL) {
1217                                 bp->line_speed = SPEED_100;
1218                                 bp->duplex = DUPLEX_FULL;
1219                         }
1220                         else if (common & ADVERTISE_100HALF) {
1221                                 bp->line_speed = SPEED_100;
1222                                 bp->duplex = DUPLEX_HALF;
1223                         }
1224                         else if (common & ADVERTISE_10FULL) {
1225                                 bp->line_speed = SPEED_10;
1226                                 bp->duplex = DUPLEX_FULL;
1227                         }
1228                         else if (common & ADVERTISE_10HALF) {
1229                                 bp->line_speed = SPEED_10;
1230                                 bp->duplex = DUPLEX_HALF;
1231                         }
1232                         else {
1233                                 bp->line_speed = 0;
1234                                 bp->link_up = 0;
1235                         }
1236                 }
1237         }
1238         else {
1239                 if (bmcr & BMCR_SPEED100) {
1240                         bp->line_speed = SPEED_100;
1241                 }
1242                 else {
1243                         bp->line_speed = SPEED_10;
1244                 }
1245                 if (bmcr & BMCR_FULLDPLX) {
1246                         bp->duplex = DUPLEX_FULL;
1247                 }
1248                 else {
1249                         bp->duplex = DUPLEX_HALF;
1250                 }
1251         }
1252
1253         return 0;
1254 }
1255
1256 static void
1257 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1258 {
1259         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1260
1261         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1262         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1263         val |= 0x02 << 8;
1264
1265         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1266                 u32 lo_water, hi_water;
1267
1268                 if (bp->flow_ctrl & FLOW_CTRL_TX)
1269                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1270                 else
1271                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1272                 if (lo_water >= bp->rx_ring_size)
1273                         lo_water = 0;
1274
1275                 hi_water = min_t(int, bp->rx_ring_size / 4, lo_water + 16);
1276
1277                 if (hi_water <= lo_water)
1278                         lo_water = 0;
1279
1280                 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1281                 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1282
1283                 if (hi_water > 0xf)
1284                         hi_water = 0xf;
1285                 else if (hi_water == 0)
1286                         lo_water = 0;
1287                 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1288         }
1289         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1290 }
1291
1292 static void
1293 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1294 {
1295         int i;
1296         u32 cid;
1297
1298         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1299                 if (i == 1)
1300                         cid = RX_RSS_CID;
1301                 bnx2_init_rx_context(bp, cid);
1302         }
1303 }
1304
1305 static void
1306 bnx2_set_mac_link(struct bnx2 *bp)
1307 {
1308         u32 val;
1309
1310         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1311         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1312                 (bp->duplex == DUPLEX_HALF)) {
1313                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1314         }
1315
1316         /* Configure the EMAC mode register. */
1317         val = REG_RD(bp, BNX2_EMAC_MODE);
1318
1319         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1320                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1321                 BNX2_EMAC_MODE_25G_MODE);
1322
1323         if (bp->link_up) {
1324                 switch (bp->line_speed) {
1325                         case SPEED_10:
1326                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1327                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1328                                         break;
1329                                 }
1330                                 /* fall through */
1331                         case SPEED_100:
1332                                 val |= BNX2_EMAC_MODE_PORT_MII;
1333                                 break;
1334                         case SPEED_2500:
1335                                 val |= BNX2_EMAC_MODE_25G_MODE;
1336                                 /* fall through */
1337                         case SPEED_1000:
1338                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1339                                 break;
1340                 }
1341         }
1342         else {
1343                 val |= BNX2_EMAC_MODE_PORT_GMII;
1344         }
1345
1346         /* Set the MAC to operate in the appropriate duplex mode. */
1347         if (bp->duplex == DUPLEX_HALF)
1348                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1349         REG_WR(bp, BNX2_EMAC_MODE, val);
1350
1351         /* Enable/disable rx PAUSE. */
1352         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1353
1354         if (bp->flow_ctrl & FLOW_CTRL_RX)
1355                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1356         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1357
1358         /* Enable/disable tx PAUSE. */
1359         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1360         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1361
1362         if (bp->flow_ctrl & FLOW_CTRL_TX)
1363                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1364         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1365
1366         /* Acknowledge the interrupt. */
1367         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1368
1369         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1370                 bnx2_init_all_rx_contexts(bp);
1371 }
1372
1373 static void
1374 bnx2_enable_bmsr1(struct bnx2 *bp)
1375 {
1376         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1377             (CHIP_NUM(bp) == CHIP_NUM_5709))
1378                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1379                                MII_BNX2_BLK_ADDR_GP_STATUS);
1380 }
1381
1382 static void
1383 bnx2_disable_bmsr1(struct bnx2 *bp)
1384 {
1385         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1386             (CHIP_NUM(bp) == CHIP_NUM_5709))
1387                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1388                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1389 }
1390
1391 static int
1392 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1393 {
1394         u32 up1;
1395         int ret = 1;
1396
1397         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1398                 return 0;
1399
1400         if (bp->autoneg & AUTONEG_SPEED)
1401                 bp->advertising |= ADVERTISED_2500baseX_Full;
1402
1403         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1404                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1405
1406         bnx2_read_phy(bp, bp->mii_up1, &up1);
1407         if (!(up1 & BCM5708S_UP1_2G5)) {
1408                 up1 |= BCM5708S_UP1_2G5;
1409                 bnx2_write_phy(bp, bp->mii_up1, up1);
1410                 ret = 0;
1411         }
1412
1413         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1414                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1415                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1416
1417         return ret;
1418 }
1419
1420 static int
1421 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1422 {
1423         u32 up1;
1424         int ret = 0;
1425
1426         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1427                 return 0;
1428
1429         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1430                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1431
1432         bnx2_read_phy(bp, bp->mii_up1, &up1);
1433         if (up1 & BCM5708S_UP1_2G5) {
1434                 up1 &= ~BCM5708S_UP1_2G5;
1435                 bnx2_write_phy(bp, bp->mii_up1, up1);
1436                 ret = 1;
1437         }
1438
1439         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1440                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1441                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1442
1443         return ret;
1444 }
1445
1446 static void
1447 bnx2_enable_forced_2g5(struct bnx2 *bp)
1448 {
1449         u32 uninitialized_var(bmcr);
1450         int err;
1451
1452         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1453                 return;
1454
1455         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1456                 u32 val;
1457
1458                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1459                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1460                 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1461                         val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1462                         val |= MII_BNX2_SD_MISC1_FORCE |
1463                                 MII_BNX2_SD_MISC1_FORCE_2_5G;
1464                         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1465                 }
1466
1467                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1468                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1469                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1470
1471         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1472                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1473                 if (!err)
1474                         bmcr |= BCM5708S_BMCR_FORCE_2500;
1475         } else {
1476                 return;
1477         }
1478
1479         if (err)
1480                 return;
1481
1482         if (bp->autoneg & AUTONEG_SPEED) {
1483                 bmcr &= ~BMCR_ANENABLE;
1484                 if (bp->req_duplex == DUPLEX_FULL)
1485                         bmcr |= BMCR_FULLDPLX;
1486         }
1487         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1488 }
1489
1490 static void
1491 bnx2_disable_forced_2g5(struct bnx2 *bp)
1492 {
1493         u32 uninitialized_var(bmcr);
1494         int err;
1495
1496         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1497                 return;
1498
1499         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1500                 u32 val;
1501
1502                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1503                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1504                 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1505                         val &= ~MII_BNX2_SD_MISC1_FORCE;
1506                         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1507                 }
1508
1509                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1510                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1511                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1512
1513         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1514                 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1515                 if (!err)
1516                         bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1517         } else {
1518                 return;
1519         }
1520
1521         if (err)
1522                 return;
1523
1524         if (bp->autoneg & AUTONEG_SPEED)
1525                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1526         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1527 }
1528
1529 static void
1530 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1531 {
1532         u32 val;
1533
1534         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1535         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1536         if (start)
1537                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1538         else
1539                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1540 }
1541
1542 static int
1543 bnx2_set_link(struct bnx2 *bp)
1544 {
1545         u32 bmsr;
1546         u8 link_up;
1547
1548         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1549                 bp->link_up = 1;
1550                 return 0;
1551         }
1552
1553         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1554                 return 0;
1555
1556         link_up = bp->link_up;
1557
1558         bnx2_enable_bmsr1(bp);
1559         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1560         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1561         bnx2_disable_bmsr1(bp);
1562
1563         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1564             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1565                 u32 val, an_dbg;
1566
1567                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1568                         bnx2_5706s_force_link_dn(bp, 0);
1569                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1570                 }
1571                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1572
1573                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1574                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1575                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1576
1577                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1578                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1579                         bmsr |= BMSR_LSTATUS;
1580                 else
1581                         bmsr &= ~BMSR_LSTATUS;
1582         }
1583
1584         if (bmsr & BMSR_LSTATUS) {
1585                 bp->link_up = 1;
1586
1587                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1588                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1589                                 bnx2_5706s_linkup(bp);
1590                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1591                                 bnx2_5708s_linkup(bp);
1592                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1593                                 bnx2_5709s_linkup(bp);
1594                 }
1595                 else {
1596                         bnx2_copper_linkup(bp);
1597                 }
1598                 bnx2_resolve_flow_ctrl(bp);
1599         }
1600         else {
1601                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1602                     (bp->autoneg & AUTONEG_SPEED))
1603                         bnx2_disable_forced_2g5(bp);
1604
1605                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1606                         u32 bmcr;
1607
1608                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1609                         bmcr |= BMCR_ANENABLE;
1610                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1611
1612                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1613                 }
1614                 bp->link_up = 0;
1615         }
1616
1617         if (bp->link_up != link_up) {
1618                 bnx2_report_link(bp);
1619         }
1620
1621         bnx2_set_mac_link(bp);
1622
1623         return 0;
1624 }
1625
1626 static int
1627 bnx2_reset_phy(struct bnx2 *bp)
1628 {
1629         int i;
1630         u32 reg;
1631
1632         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1633
1634 #define PHY_RESET_MAX_WAIT 100
1635         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1636                 udelay(10);
1637
1638                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1639                 if (!(reg & BMCR_RESET)) {
1640                         udelay(20);
1641                         break;
1642                 }
1643         }
1644         if (i == PHY_RESET_MAX_WAIT) {
1645                 return -EBUSY;
1646         }
1647         return 0;
1648 }
1649
1650 static u32
1651 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1652 {
1653         u32 adv = 0;
1654
1655         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1656                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1657
1658                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1659                         adv = ADVERTISE_1000XPAUSE;
1660                 }
1661                 else {
1662                         adv = ADVERTISE_PAUSE_CAP;
1663                 }
1664         }
1665         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1666                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1667                         adv = ADVERTISE_1000XPSE_ASYM;
1668                 }
1669                 else {
1670                         adv = ADVERTISE_PAUSE_ASYM;
1671                 }
1672         }
1673         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1674                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1675                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1676                 }
1677                 else {
1678                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1679                 }
1680         }
1681         return adv;
1682 }
1683
1684 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1685
1686 static int
1687 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1688 __releases(&bp->phy_lock)
1689 __acquires(&bp->phy_lock)
1690 {
1691         u32 speed_arg = 0, pause_adv;
1692
1693         pause_adv = bnx2_phy_get_pause_adv(bp);
1694
1695         if (bp->autoneg & AUTONEG_SPEED) {
1696                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1697                 if (bp->advertising & ADVERTISED_10baseT_Half)
1698                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1699                 if (bp->advertising & ADVERTISED_10baseT_Full)
1700                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1701                 if (bp->advertising & ADVERTISED_100baseT_Half)
1702                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1703                 if (bp->advertising & ADVERTISED_100baseT_Full)
1704                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1705                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1706                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1707                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1708                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1709         } else {
1710                 if (bp->req_line_speed == SPEED_2500)
1711                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1712                 else if (bp->req_line_speed == SPEED_1000)
1713                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1714                 else if (bp->req_line_speed == SPEED_100) {
1715                         if (bp->req_duplex == DUPLEX_FULL)
1716                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1717                         else
1718                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1719                 } else if (bp->req_line_speed == SPEED_10) {
1720                         if (bp->req_duplex == DUPLEX_FULL)
1721                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1722                         else
1723                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1724                 }
1725         }
1726
1727         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1728                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1729         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1730                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1731
1732         if (port == PORT_TP)
1733                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1734                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1735
1736         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1737
1738         spin_unlock_bh(&bp->phy_lock);
1739         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1740         spin_lock_bh(&bp->phy_lock);
1741
1742         return 0;
1743 }
1744
1745 static int
1746 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1747 __releases(&bp->phy_lock)
1748 __acquires(&bp->phy_lock)
1749 {
1750         u32 adv, bmcr;
1751         u32 new_adv = 0;
1752
1753         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1754                 return (bnx2_setup_remote_phy(bp, port));
1755
1756         if (!(bp->autoneg & AUTONEG_SPEED)) {
1757                 u32 new_bmcr;
1758                 int force_link_down = 0;
1759
1760                 if (bp->req_line_speed == SPEED_2500) {
1761                         if (!bnx2_test_and_enable_2g5(bp))
1762                                 force_link_down = 1;
1763                 } else if (bp->req_line_speed == SPEED_1000) {
1764                         if (bnx2_test_and_disable_2g5(bp))
1765                                 force_link_down = 1;
1766                 }
1767                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1768                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1769
1770                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1771                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1772                 new_bmcr |= BMCR_SPEED1000;
1773
1774                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1775                         if (bp->req_line_speed == SPEED_2500)
1776                                 bnx2_enable_forced_2g5(bp);
1777                         else if (bp->req_line_speed == SPEED_1000) {
1778                                 bnx2_disable_forced_2g5(bp);
1779                                 new_bmcr &= ~0x2000;
1780                         }
1781
1782                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1783                         if (bp->req_line_speed == SPEED_2500)
1784                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1785                         else
1786                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1787                 }
1788
1789                 if (bp->req_duplex == DUPLEX_FULL) {
1790                         adv |= ADVERTISE_1000XFULL;
1791                         new_bmcr |= BMCR_FULLDPLX;
1792                 }
1793                 else {
1794                         adv |= ADVERTISE_1000XHALF;
1795                         new_bmcr &= ~BMCR_FULLDPLX;
1796                 }
1797                 if ((new_bmcr != bmcr) || (force_link_down)) {
1798                         /* Force a link down visible on the other side */
1799                         if (bp->link_up) {
1800                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1801                                                ~(ADVERTISE_1000XFULL |
1802                                                  ADVERTISE_1000XHALF));
1803                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1804                                         BMCR_ANRESTART | BMCR_ANENABLE);
1805
1806                                 bp->link_up = 0;
1807                                 netif_carrier_off(bp->dev);
1808                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1809                                 bnx2_report_link(bp);
1810                         }
1811                         bnx2_write_phy(bp, bp->mii_adv, adv);
1812                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1813                 } else {
1814                         bnx2_resolve_flow_ctrl(bp);
1815                         bnx2_set_mac_link(bp);
1816                 }
1817                 return 0;
1818         }
1819
1820         bnx2_test_and_enable_2g5(bp);
1821
1822         if (bp->advertising & ADVERTISED_1000baseT_Full)
1823                 new_adv |= ADVERTISE_1000XFULL;
1824
1825         new_adv |= bnx2_phy_get_pause_adv(bp);
1826
1827         bnx2_read_phy(bp, bp->mii_adv, &adv);
1828         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1829
1830         bp->serdes_an_pending = 0;
1831         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1832                 /* Force a link down visible on the other side */
1833                 if (bp->link_up) {
1834                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1835                         spin_unlock_bh(&bp->phy_lock);
1836                         msleep(20);
1837                         spin_lock_bh(&bp->phy_lock);
1838                 }
1839
1840                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1841                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1842                         BMCR_ANENABLE);
1843                 /* Speed up link-up time when the link partner
1844                  * does not autonegotiate which is very common
1845                  * in blade servers. Some blade servers use
1846                  * IPMI for kerboard input and it's important
1847                  * to minimize link disruptions. Autoneg. involves
1848                  * exchanging base pages plus 3 next pages and
1849                  * normally completes in about 120 msec.
1850                  */
1851                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1852                 bp->serdes_an_pending = 1;
1853                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1854         } else {
1855                 bnx2_resolve_flow_ctrl(bp);
1856                 bnx2_set_mac_link(bp);
1857         }
1858
1859         return 0;
1860 }
1861
1862 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1863         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1864                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1865                 (ADVERTISED_1000baseT_Full)
1866
1867 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1868         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1869         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1870         ADVERTISED_1000baseT_Full)
1871
1872 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1873         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1874
1875 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1876
1877 static void
1878 bnx2_set_default_remote_link(struct bnx2 *bp)
1879 {
1880         u32 link;
1881
1882         if (bp->phy_port == PORT_TP)
1883                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1884         else
1885                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1886
1887         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1888                 bp->req_line_speed = 0;
1889                 bp->autoneg |= AUTONEG_SPEED;
1890                 bp->advertising = ADVERTISED_Autoneg;
1891                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1892                         bp->advertising |= ADVERTISED_10baseT_Half;
1893                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1894                         bp->advertising |= ADVERTISED_10baseT_Full;
1895                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1896                         bp->advertising |= ADVERTISED_100baseT_Half;
1897                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1898                         bp->advertising |= ADVERTISED_100baseT_Full;
1899                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1900                         bp->advertising |= ADVERTISED_1000baseT_Full;
1901                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1902                         bp->advertising |= ADVERTISED_2500baseX_Full;
1903         } else {
1904                 bp->autoneg = 0;
1905                 bp->advertising = 0;
1906                 bp->req_duplex = DUPLEX_FULL;
1907                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1908                         bp->req_line_speed = SPEED_10;
1909                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1910                                 bp->req_duplex = DUPLEX_HALF;
1911                 }
1912                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1913                         bp->req_line_speed = SPEED_100;
1914                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1915                                 bp->req_duplex = DUPLEX_HALF;
1916                 }
1917                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1918                         bp->req_line_speed = SPEED_1000;
1919                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1920                         bp->req_line_speed = SPEED_2500;
1921         }
1922 }
1923
1924 static void
1925 bnx2_set_default_link(struct bnx2 *bp)
1926 {
1927         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1928                 bnx2_set_default_remote_link(bp);
1929                 return;
1930         }
1931
1932         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1933         bp->req_line_speed = 0;
1934         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1935                 u32 reg;
1936
1937                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1938
1939                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1940                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1941                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1942                         bp->autoneg = 0;
1943                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1944                         bp->req_duplex = DUPLEX_FULL;
1945                 }
1946         } else
1947                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1948 }
1949
1950 static void
1951 bnx2_send_heart_beat(struct bnx2 *bp)
1952 {
1953         u32 msg;
1954         u32 addr;
1955
1956         spin_lock(&bp->indirect_lock);
1957         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1958         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1959         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1960         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1961         spin_unlock(&bp->indirect_lock);
1962 }
1963
1964 static void
1965 bnx2_remote_phy_event(struct bnx2 *bp)
1966 {
1967         u32 msg;
1968         u8 link_up = bp->link_up;
1969         u8 old_port;
1970
1971         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1972
1973         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1974                 bnx2_send_heart_beat(bp);
1975
1976         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1977
1978         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1979                 bp->link_up = 0;
1980         else {
1981                 u32 speed;
1982
1983                 bp->link_up = 1;
1984                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1985                 bp->duplex = DUPLEX_FULL;
1986                 switch (speed) {
1987                         case BNX2_LINK_STATUS_10HALF:
1988                                 bp->duplex = DUPLEX_HALF;
1989                         case BNX2_LINK_STATUS_10FULL:
1990                                 bp->line_speed = SPEED_10;
1991                                 break;
1992                         case BNX2_LINK_STATUS_100HALF:
1993                                 bp->duplex = DUPLEX_HALF;
1994                         case BNX2_LINK_STATUS_100BASE_T4:
1995                         case BNX2_LINK_STATUS_100FULL:
1996                                 bp->line_speed = SPEED_100;
1997                                 break;
1998                         case BNX2_LINK_STATUS_1000HALF:
1999                                 bp->duplex = DUPLEX_HALF;
2000                         case BNX2_LINK_STATUS_1000FULL:
2001                                 bp->line_speed = SPEED_1000;
2002                                 break;
2003                         case BNX2_LINK_STATUS_2500HALF:
2004                                 bp->duplex = DUPLEX_HALF;
2005                         case BNX2_LINK_STATUS_2500FULL:
2006                                 bp->line_speed = SPEED_2500;
2007                                 break;
2008                         default:
2009                                 bp->line_speed = 0;
2010                                 break;
2011                 }
2012
2013                 bp->flow_ctrl = 0;
2014                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2015                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2016                         if (bp->duplex == DUPLEX_FULL)
2017                                 bp->flow_ctrl = bp->req_flow_ctrl;
2018                 } else {
2019                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2020                                 bp->flow_ctrl |= FLOW_CTRL_TX;
2021                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2022                                 bp->flow_ctrl |= FLOW_CTRL_RX;
2023                 }
2024
2025                 old_port = bp->phy_port;
2026                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2027                         bp->phy_port = PORT_FIBRE;
2028                 else
2029                         bp->phy_port = PORT_TP;
2030
2031                 if (old_port != bp->phy_port)
2032                         bnx2_set_default_link(bp);
2033
2034         }
2035         if (bp->link_up != link_up)
2036                 bnx2_report_link(bp);
2037
2038         bnx2_set_mac_link(bp);
2039 }
2040
2041 static int
2042 bnx2_set_remote_link(struct bnx2 *bp)
2043 {
2044         u32 evt_code;
2045
2046         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2047         switch (evt_code) {
2048                 case BNX2_FW_EVT_CODE_LINK_EVENT:
2049                         bnx2_remote_phy_event(bp);
2050                         break;
2051                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2052                 default:
2053                         bnx2_send_heart_beat(bp);
2054                         break;
2055         }
2056         return 0;
2057 }
2058
2059 static int
2060 bnx2_setup_copper_phy(struct bnx2 *bp)
2061 __releases(&bp->phy_lock)
2062 __acquires(&bp->phy_lock)
2063 {
2064         u32 bmcr;
2065         u32 new_bmcr;
2066
2067         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2068
2069         if (bp->autoneg & AUTONEG_SPEED) {
2070                 u32 adv_reg, adv1000_reg;
2071                 u32 new_adv_reg = 0;
2072                 u32 new_adv1000_reg = 0;
2073
2074                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2075                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2076                         ADVERTISE_PAUSE_ASYM);
2077
2078                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2079                 adv1000_reg &= PHY_ALL_1000_SPEED;
2080
2081                 if (bp->advertising & ADVERTISED_10baseT_Half)
2082                         new_adv_reg |= ADVERTISE_10HALF;
2083                 if (bp->advertising & ADVERTISED_10baseT_Full)
2084                         new_adv_reg |= ADVERTISE_10FULL;
2085                 if (bp->advertising & ADVERTISED_100baseT_Half)
2086                         new_adv_reg |= ADVERTISE_100HALF;
2087                 if (bp->advertising & ADVERTISED_100baseT_Full)
2088                         new_adv_reg |= ADVERTISE_100FULL;
2089                 if (bp->advertising & ADVERTISED_1000baseT_Full)
2090                         new_adv1000_reg |= ADVERTISE_1000FULL;
2091
2092                 new_adv_reg |= ADVERTISE_CSMA;
2093
2094                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
2095
2096                 if ((adv1000_reg != new_adv1000_reg) ||
2097                         (adv_reg != new_adv_reg) ||
2098                         ((bmcr & BMCR_ANENABLE) == 0)) {
2099
2100                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
2101                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
2102                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2103                                 BMCR_ANENABLE);
2104                 }
2105                 else if (bp->link_up) {
2106                         /* Flow ctrl may have changed from auto to forced */
2107                         /* or vice-versa. */
2108
2109                         bnx2_resolve_flow_ctrl(bp);
2110                         bnx2_set_mac_link(bp);
2111                 }
2112                 return 0;
2113         }
2114
2115         new_bmcr = 0;
2116         if (bp->req_line_speed == SPEED_100) {
2117                 new_bmcr |= BMCR_SPEED100;
2118         }
2119         if (bp->req_duplex == DUPLEX_FULL) {
2120                 new_bmcr |= BMCR_FULLDPLX;
2121         }
2122         if (new_bmcr != bmcr) {
2123                 u32 bmsr;
2124
2125                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2126                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2127
2128                 if (bmsr & BMSR_LSTATUS) {
2129                         /* Force link down */
2130                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2131                         spin_unlock_bh(&bp->phy_lock);
2132                         msleep(50);
2133                         spin_lock_bh(&bp->phy_lock);
2134
2135                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2136                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2137                 }
2138
2139                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2140
2141                 /* Normally, the new speed is setup after the link has
2142                  * gone down and up again. In some cases, link will not go
2143                  * down so we need to set up the new speed here.
2144                  */
2145                 if (bmsr & BMSR_LSTATUS) {
2146                         bp->line_speed = bp->req_line_speed;
2147                         bp->duplex = bp->req_duplex;
2148                         bnx2_resolve_flow_ctrl(bp);
2149                         bnx2_set_mac_link(bp);
2150                 }
2151         } else {
2152                 bnx2_resolve_flow_ctrl(bp);
2153                 bnx2_set_mac_link(bp);
2154         }
2155         return 0;
2156 }
2157
2158 static int
2159 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2160 __releases(&bp->phy_lock)
2161 __acquires(&bp->phy_lock)
2162 {
2163         if (bp->loopback == MAC_LOOPBACK)
2164                 return 0;
2165
2166         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2167                 return (bnx2_setup_serdes_phy(bp, port));
2168         }
2169         else {
2170                 return (bnx2_setup_copper_phy(bp));
2171         }
2172 }
2173
2174 static int
2175 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2176 {
2177         u32 val;
2178
2179         bp->mii_bmcr = MII_BMCR + 0x10;
2180         bp->mii_bmsr = MII_BMSR + 0x10;
2181         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2182         bp->mii_adv = MII_ADVERTISE + 0x10;
2183         bp->mii_lpa = MII_LPA + 0x10;
2184         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2185
2186         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2187         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2188
2189         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2190         if (reset_phy)
2191                 bnx2_reset_phy(bp);
2192
2193         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2194
2195         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2196         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2197         val |= MII_BNX2_SD_1000XCTL1_FIBER;
2198         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2199
2200         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2201         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2202         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2203                 val |= BCM5708S_UP1_2G5;
2204         else
2205                 val &= ~BCM5708S_UP1_2G5;
2206         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2207
2208         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2209         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2210         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2211         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2212
2213         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2214
2215         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2216               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2217         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2218
2219         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2220
2221         return 0;
2222 }
2223
2224 static int
2225 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2226 {
2227         u32 val;
2228
2229         if (reset_phy)
2230                 bnx2_reset_phy(bp);
2231
2232         bp->mii_up1 = BCM5708S_UP1;
2233
2234         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2235         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2236         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2237
2238         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2239         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2240         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2241
2242         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2243         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2244         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2245
2246         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2247                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2248                 val |= BCM5708S_UP1_2G5;
2249                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2250         }
2251
2252         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2253             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2254             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2255                 /* increase tx signal amplitude */
2256                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2257                                BCM5708S_BLK_ADDR_TX_MISC);
2258                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2259                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2260                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2261                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2262         }
2263
2264         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2265               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2266
2267         if (val) {
2268                 u32 is_backplane;
2269
2270                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2271                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2272                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2273                                        BCM5708S_BLK_ADDR_TX_MISC);
2274                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2275                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2276                                        BCM5708S_BLK_ADDR_DIG);
2277                 }
2278         }
2279         return 0;
2280 }
2281
2282 static int
2283 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2284 {
2285         if (reset_phy)
2286                 bnx2_reset_phy(bp);
2287
2288         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2289
2290         if (CHIP_NUM(bp) == CHIP_NUM_5706)
2291                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2292
2293         if (bp->dev->mtu > 1500) {
2294                 u32 val;
2295
2296                 /* Set extended packet length bit */
2297                 bnx2_write_phy(bp, 0x18, 0x7);
2298                 bnx2_read_phy(bp, 0x18, &val);
2299                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2300
2301                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2302                 bnx2_read_phy(bp, 0x1c, &val);
2303                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2304         }
2305         else {
2306                 u32 val;
2307
2308                 bnx2_write_phy(bp, 0x18, 0x7);
2309                 bnx2_read_phy(bp, 0x18, &val);
2310                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2311
2312                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2313                 bnx2_read_phy(bp, 0x1c, &val);
2314                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2315         }
2316
2317         return 0;
2318 }
2319
2320 static int
2321 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2322 {
2323         u32 val;
2324
2325         if (reset_phy)
2326                 bnx2_reset_phy(bp);
2327
2328         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2329                 bnx2_write_phy(bp, 0x18, 0x0c00);
2330                 bnx2_write_phy(bp, 0x17, 0x000a);
2331                 bnx2_write_phy(bp, 0x15, 0x310b);
2332                 bnx2_write_phy(bp, 0x17, 0x201f);
2333                 bnx2_write_phy(bp, 0x15, 0x9506);
2334                 bnx2_write_phy(bp, 0x17, 0x401f);
2335                 bnx2_write_phy(bp, 0x15, 0x14e2);
2336                 bnx2_write_phy(bp, 0x18, 0x0400);
2337         }
2338
2339         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2340                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2341                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2342                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2343                 val &= ~(1 << 8);
2344                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2345         }
2346
2347         if (bp->dev->mtu > 1500) {
2348                 /* Set extended packet length bit */
2349                 bnx2_write_phy(bp, 0x18, 0x7);
2350                 bnx2_read_phy(bp, 0x18, &val);
2351                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2352
2353                 bnx2_read_phy(bp, 0x10, &val);
2354                 bnx2_write_phy(bp, 0x10, val | 0x1);
2355         }
2356         else {
2357                 bnx2_write_phy(bp, 0x18, 0x7);
2358                 bnx2_read_phy(bp, 0x18, &val);
2359                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2360
2361                 bnx2_read_phy(bp, 0x10, &val);
2362                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2363         }
2364
2365         /* ethernet@wirespeed */
2366         bnx2_write_phy(bp, 0x18, 0x7007);
2367         bnx2_read_phy(bp, 0x18, &val);
2368         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2369         return 0;
2370 }
2371
2372
2373 static int
2374 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2375 __releases(&bp->phy_lock)
2376 __acquires(&bp->phy_lock)
2377 {
2378         u32 val;
2379         int rc = 0;
2380
2381         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2382         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2383
2384         bp->mii_bmcr = MII_BMCR;
2385         bp->mii_bmsr = MII_BMSR;
2386         bp->mii_bmsr1 = MII_BMSR;
2387         bp->mii_adv = MII_ADVERTISE;
2388         bp->mii_lpa = MII_LPA;
2389
2390         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2391
2392         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2393                 goto setup_phy;
2394
2395         bnx2_read_phy(bp, MII_PHYSID1, &val);
2396         bp->phy_id = val << 16;
2397         bnx2_read_phy(bp, MII_PHYSID2, &val);
2398         bp->phy_id |= val & 0xffff;
2399
2400         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2401                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2402                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2403                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2404                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2405                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2406                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2407         }
2408         else {
2409                 rc = bnx2_init_copper_phy(bp, reset_phy);
2410         }
2411
2412 setup_phy:
2413         if (!rc)
2414                 rc = bnx2_setup_phy(bp, bp->phy_port);
2415
2416         return rc;
2417 }
2418
2419 static int
2420 bnx2_set_mac_loopback(struct bnx2 *bp)
2421 {
2422         u32 mac_mode;
2423
2424         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2425         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2426         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2427         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2428         bp->link_up = 1;
2429         return 0;
2430 }
2431
2432 static int bnx2_test_link(struct bnx2 *);
2433
2434 static int
2435 bnx2_set_phy_loopback(struct bnx2 *bp)
2436 {
2437         u32 mac_mode;
2438         int rc, i;
2439
2440         spin_lock_bh(&bp->phy_lock);
2441         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2442                             BMCR_SPEED1000);
2443         spin_unlock_bh(&bp->phy_lock);
2444         if (rc)
2445                 return rc;
2446
2447         for (i = 0; i < 10; i++) {
2448                 if (bnx2_test_link(bp) == 0)
2449                         break;
2450                 msleep(100);
2451         }
2452
2453         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2454         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2455                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2456                       BNX2_EMAC_MODE_25G_MODE);
2457
2458         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2459         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2460         bp->link_up = 1;
2461         return 0;
2462 }
2463
2464 static int
2465 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2466 {
2467         int i;
2468         u32 val;
2469
2470         bp->fw_wr_seq++;
2471         msg_data |= bp->fw_wr_seq;
2472
2473         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2474
2475         if (!ack)
2476                 return 0;
2477
2478         /* wait for an acknowledgement. */
2479         for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2480                 msleep(10);
2481
2482                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2483
2484                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2485                         break;
2486         }
2487         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2488                 return 0;
2489
2490         /* If we timed out, inform the firmware that this is the case. */
2491         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2492                 if (!silent)
2493                         pr_err("fw sync timeout, reset code = %x\n", msg_data);
2494
2495                 msg_data &= ~BNX2_DRV_MSG_CODE;
2496                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2497
2498                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2499
2500                 return -EBUSY;
2501         }
2502
2503         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2504                 return -EIO;
2505
2506         return 0;
2507 }
2508
2509 static int
2510 bnx2_init_5709_context(struct bnx2 *bp)
2511 {
2512         int i, ret = 0;
2513         u32 val;
2514
2515         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2516         val |= (BCM_PAGE_BITS - 8) << 16;
2517         REG_WR(bp, BNX2_CTX_COMMAND, val);
2518         for (i = 0; i < 10; i++) {
2519                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2520                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2521                         break;
2522                 udelay(2);
2523         }
2524         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2525                 return -EBUSY;
2526
2527         for (i = 0; i < bp->ctx_pages; i++) {
2528                 int j;
2529
2530                 if (bp->ctx_blk[i])
2531                         memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2532                 else
2533                         return -ENOMEM;
2534
2535                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2536                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2537                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2538                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2539                        (u64) bp->ctx_blk_mapping[i] >> 32);
2540                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2541                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2542                 for (j = 0; j < 10; j++) {
2543
2544                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2545                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2546                                 break;
2547                         udelay(5);
2548                 }
2549                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2550                         ret = -EBUSY;
2551                         break;
2552                 }
2553         }
2554         return ret;
2555 }
2556
2557 static void
2558 bnx2_init_context(struct bnx2 *bp)
2559 {
2560         u32 vcid;
2561
2562         vcid = 96;
2563         while (vcid) {
2564                 u32 vcid_addr, pcid_addr, offset;
2565                 int i;
2566
2567                 vcid--;
2568
2569                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2570                         u32 new_vcid;
2571
2572                         vcid_addr = GET_PCID_ADDR(vcid);
2573                         if (vcid & 0x8) {
2574                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2575                         }
2576                         else {
2577                                 new_vcid = vcid;
2578                         }
2579                         pcid_addr = GET_PCID_ADDR(new_vcid);
2580                 }
2581                 else {
2582                         vcid_addr = GET_CID_ADDR(vcid);
2583                         pcid_addr = vcid_addr;
2584                 }
2585
2586                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2587                         vcid_addr += (i << PHY_CTX_SHIFT);
2588                         pcid_addr += (i << PHY_CTX_SHIFT);
2589
2590                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2591                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2592
2593                         /* Zero out the context. */
2594                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2595                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2596                 }
2597         }
2598 }
2599
2600 static int
2601 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2602 {
2603         u16 *good_mbuf;
2604         u32 good_mbuf_cnt;
2605         u32 val;
2606
2607         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2608         if (good_mbuf == NULL) {
2609                 pr_err("Failed to allocate memory in %s\n", __func__);
2610                 return -ENOMEM;
2611         }
2612
2613         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2614                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2615
2616         good_mbuf_cnt = 0;
2617
2618         /* Allocate a bunch of mbufs and save the good ones in an array. */
2619         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2620         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2621                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2622                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2623
2624                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2625
2626                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2627
2628                 /* The addresses with Bit 9 set are bad memory blocks. */
2629                 if (!(val & (1 << 9))) {
2630                         good_mbuf[good_mbuf_cnt] = (u16) val;
2631                         good_mbuf_cnt++;
2632                 }
2633
2634                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2635         }
2636
2637         /* Free the good ones back to the mbuf pool thus discarding
2638          * all the bad ones. */
2639         while (good_mbuf_cnt) {
2640                 good_mbuf_cnt--;
2641
2642                 val = good_mbuf[good_mbuf_cnt];
2643                 val = (val << 9) | val | 1;
2644
2645                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2646         }
2647         kfree(good_mbuf);
2648         return 0;
2649 }
2650
2651 static void
2652 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2653 {
2654         u32 val;
2655
2656         val = (mac_addr[0] << 8) | mac_addr[1];
2657
2658         REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2659
2660         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2661                 (mac_addr[4] << 8) | mac_addr[5];
2662
2663         REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2664 }
2665
2666 static inline int
2667 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2668 {
2669         dma_addr_t mapping;
2670         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2671         struct rx_bd *rxbd =
2672                 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2673         struct page *page = alloc_page(GFP_ATOMIC);
2674
2675         if (!page)
2676                 return -ENOMEM;
2677         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2678                                PCI_DMA_FROMDEVICE);
2679         if (pci_dma_mapping_error(bp->pdev, mapping)) {
2680                 __free_page(page);
2681                 return -EIO;
2682         }
2683
2684         rx_pg->page = page;
2685         dma_unmap_addr_set(rx_pg, mapping, mapping);
2686         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2687         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2688         return 0;
2689 }
2690
2691 static void
2692 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2693 {
2694         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2695         struct page *page = rx_pg->page;
2696
2697         if (!page)
2698                 return;
2699
2700         pci_unmap_page(bp->pdev, dma_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2701                        PCI_DMA_FROMDEVICE);
2702
2703         __free_page(page);
2704         rx_pg->page = NULL;
2705 }
2706
2707 static inline int
2708 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2709 {
2710         struct sk_buff *skb;
2711         struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2712         dma_addr_t mapping;
2713         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2714         unsigned long align;
2715
2716         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2717         if (skb == NULL) {
2718                 return -ENOMEM;
2719         }
2720
2721         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2722                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2723
2724         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2725                 PCI_DMA_FROMDEVICE);
2726         if (pci_dma_mapping_error(bp->pdev, mapping)) {
2727                 dev_kfree_skb(skb);
2728                 return -EIO;
2729         }
2730
2731         rx_buf->skb = skb;
2732         rx_buf->desc = (struct l2_fhdr *) skb->data;
2733         dma_unmap_addr_set(rx_buf, mapping, mapping);
2734
2735         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2736         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2737
2738         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2739
2740         return 0;
2741 }
2742
2743 static int
2744 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2745 {
2746         struct status_block *sblk = bnapi->status_blk.msi;
2747         u32 new_link_state, old_link_state;
2748         int is_set = 1;
2749
2750         new_link_state = sblk->status_attn_bits & event;
2751         old_link_state = sblk->status_attn_bits_ack & event;
2752         if (new_link_state != old_link_state) {
2753                 if (new_link_state)
2754                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2755                 else
2756                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2757         } else
2758                 is_set = 0;
2759
2760         return is_set;
2761 }
2762
2763 static void
2764 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2765 {
2766         spin_lock(&bp->phy_lock);
2767
2768         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2769                 bnx2_set_link(bp);
2770         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2771                 bnx2_set_remote_link(bp);
2772
2773         spin_unlock(&bp->phy_lock);
2774
2775 }
2776
2777 static inline u16
2778 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2779 {
2780         u16 cons;
2781
2782         /* Tell compiler that status block fields can change. */
2783         barrier();
2784         cons = *bnapi->hw_tx_cons_ptr;
2785         barrier();
2786         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2787                 cons++;
2788         return cons;
2789 }
2790
2791 static int
2792 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2793 {
2794         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2795         u16 hw_cons, sw_cons, sw_ring_cons;
2796         int tx_pkt = 0, index;
2797         struct netdev_queue *txq;
2798
2799         index = (bnapi - bp->bnx2_napi);
2800         txq = netdev_get_tx_queue(bp->dev, index);
2801
2802         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2803         sw_cons = txr->tx_cons;
2804
2805         while (sw_cons != hw_cons) {
2806                 struct sw_tx_bd *tx_buf;
2807                 struct sk_buff *skb;
2808                 int i, last;
2809
2810                 sw_ring_cons = TX_RING_IDX(sw_cons);
2811
2812                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2813                 skb = tx_buf->skb;
2814
2815                 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2816                 prefetch(&skb->end);
2817
2818                 /* partial BD completions possible with TSO packets */
2819                 if (tx_buf->is_gso) {
2820                         u16 last_idx, last_ring_idx;
2821
2822                         last_idx = sw_cons + tx_buf->nr_frags + 1;
2823                         last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2824                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2825                                 last_idx++;
2826                         }
2827                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2828                                 break;
2829                         }
2830                 }
2831
2832                 pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
2833                         skb_headlen(skb), PCI_DMA_TODEVICE);
2834
2835                 tx_buf->skb = NULL;
2836                 last = tx_buf->nr_frags;
2837
2838                 for (i = 0; i < last; i++) {
2839                         sw_cons = NEXT_TX_BD(sw_cons);
2840
2841                         pci_unmap_page(bp->pdev,
2842                                 dma_unmap_addr(
2843                                         &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2844                                         mapping),
2845                                 skb_shinfo(skb)->frags[i].size,
2846                                 PCI_DMA_TODEVICE);
2847                 }
2848
2849                 sw_cons = NEXT_TX_BD(sw_cons);
2850
2851                 dev_kfree_skb(skb);
2852                 tx_pkt++;
2853                 if (tx_pkt == budget)
2854                         break;
2855
2856                 if (hw_cons == sw_cons)
2857                         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2858         }
2859
2860         txr->hw_tx_cons = hw_cons;
2861         txr->tx_cons = sw_cons;
2862
2863         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2864          * before checking for netif_tx_queue_stopped().  Without the
2865          * memory barrier, there is a small possibility that bnx2_start_xmit()
2866          * will miss it and cause the queue to be stopped forever.
2867          */
2868         smp_mb();
2869
2870         if (unlikely(netif_tx_queue_stopped(txq)) &&
2871                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2872                 __netif_tx_lock(txq, smp_processor_id());
2873                 if ((netif_tx_queue_stopped(txq)) &&
2874                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2875                         netif_tx_wake_queue(txq);
2876                 __netif_tx_unlock(txq);
2877         }
2878
2879         return tx_pkt;
2880 }
2881
2882 static void
2883 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2884                         struct sk_buff *skb, int count)
2885 {
2886         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2887         struct rx_bd *cons_bd, *prod_bd;
2888         int i;
2889         u16 hw_prod, prod;
2890         u16 cons = rxr->rx_pg_cons;
2891
2892         cons_rx_pg = &rxr->rx_pg_ring[cons];
2893
2894         /* The caller was unable to allocate a new page to replace the
2895          * last one in the frags array, so we need to recycle that page
2896          * and then free the skb.
2897          */
2898         if (skb) {
2899                 struct page *page;
2900                 struct skb_shared_info *shinfo;
2901
2902                 shinfo = skb_shinfo(skb);
2903                 shinfo->nr_frags--;
2904                 page = shinfo->frags[shinfo->nr_frags].page;
2905                 shinfo->frags[shinfo->nr_frags].page = NULL;
2906
2907                 cons_rx_pg->page = page;
2908                 dev_kfree_skb(skb);
2909         }
2910
2911         hw_prod = rxr->rx_pg_prod;
2912
2913         for (i = 0; i < count; i++) {
2914                 prod = RX_PG_RING_IDX(hw_prod);
2915
2916                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2917                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2918                 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2919                 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2920
2921                 if (prod != cons) {
2922                         prod_rx_pg->page = cons_rx_pg->page;
2923                         cons_rx_pg->page = NULL;
2924                         dma_unmap_addr_set(prod_rx_pg, mapping,
2925                                 dma_unmap_addr(cons_rx_pg, mapping));
2926
2927                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2928                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2929
2930                 }
2931                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2932                 hw_prod = NEXT_RX_BD(hw_prod);
2933         }
2934         rxr->rx_pg_prod = hw_prod;
2935         rxr->rx_pg_cons = cons;
2936 }
2937
2938 static inline void
2939 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2940                   struct sk_buff *skb, u16 cons, u16 prod)
2941 {
2942         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2943         struct rx_bd *cons_bd, *prod_bd;
2944
2945         cons_rx_buf = &rxr->rx_buf_ring[cons];
2946         prod_rx_buf = &rxr->rx_buf_ring[prod];
2947
2948         pci_dma_sync_single_for_device(bp->pdev,
2949                 dma_unmap_addr(cons_rx_buf, mapping),
2950                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2951
2952         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2953
2954         prod_rx_buf->skb = skb;
2955         prod_rx_buf->desc = (struct l2_fhdr *) skb->data;
2956
2957         if (cons == prod)
2958                 return;
2959
2960         dma_unmap_addr_set(prod_rx_buf, mapping,
2961                         dma_unmap_addr(cons_rx_buf, mapping));
2962
2963         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2964         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2965         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2966         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2967 }
2968
2969 static int
2970 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2971             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2972             u32 ring_idx)
2973 {
2974         int err;
2975         u16 prod = ring_idx & 0xffff;
2976
2977         err = bnx2_alloc_rx_skb(bp, rxr, prod);
2978         if (unlikely(err)) {
2979                 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2980                 if (hdr_len) {
2981                         unsigned int raw_len = len + 4;
2982                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2983
2984                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2985                 }
2986                 return err;
2987         }
2988
2989         skb_reserve(skb, BNX2_RX_OFFSET);
2990         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2991                          PCI_DMA_FROMDEVICE);
2992
2993         if (hdr_len == 0) {
2994                 skb_put(skb, len);
2995                 return 0;
2996         } else {
2997                 unsigned int i, frag_len, frag_size, pages;
2998                 struct sw_pg *rx_pg;
2999                 u16 pg_cons = rxr->rx_pg_cons;
3000                 u16 pg_prod = rxr->rx_pg_prod;
3001
3002                 frag_size = len + 4 - hdr_len;
3003                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3004                 skb_put(skb, hdr_len);
3005
3006                 for (i = 0; i < pages; i++) {
3007                         dma_addr_t mapping_old;
3008
3009                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3010                         if (unlikely(frag_len <= 4)) {
3011                                 unsigned int tail = 4 - frag_len;
3012
3013                                 rxr->rx_pg_cons = pg_cons;
3014                                 rxr->rx_pg_prod = pg_prod;
3015                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3016                                                         pages - i);
3017                                 skb->len -= tail;
3018                                 if (i == 0) {
3019                                         skb->tail -= tail;
3020                                 } else {
3021                                         skb_frag_t *frag =
3022                                                 &skb_shinfo(skb)->frags[i - 1];
3023                                         frag->size -= tail;
3024                                         skb->data_len -= tail;
3025                                         skb->truesize -= tail;
3026                                 }
3027                                 return 0;
3028                         }
3029                         rx_pg = &rxr->rx_pg_ring[pg_cons];
3030
3031                         /* Don't unmap yet.  If we're unable to allocate a new
3032                          * page, we need to recycle the page and the DMA addr.
3033                          */
3034                         mapping_old = dma_unmap_addr(rx_pg, mapping);
3035                         if (i == pages - 1)
3036                                 frag_len -= 4;
3037
3038                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3039                         rx_pg->page = NULL;
3040
3041                         err = bnx2_alloc_rx_page(bp, rxr,
3042                                                  RX_PG_RING_IDX(pg_prod));
3043                         if (unlikely(err)) {
3044                                 rxr->rx_pg_cons = pg_cons;
3045                                 rxr->rx_pg_prod = pg_prod;
3046                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3047                                                         pages - i);
3048                                 return err;
3049                         }
3050
3051                         pci_unmap_page(bp->pdev, mapping_old,
3052                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
3053
3054                         frag_size -= frag_len;
3055                         skb->data_len += frag_len;
3056                         skb->truesize += frag_len;
3057                         skb->len += frag_len;
3058
3059                         pg_prod = NEXT_RX_BD(pg_prod);
3060                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3061                 }
3062                 rxr->rx_pg_prod = pg_prod;
3063                 rxr->rx_pg_cons = pg_cons;
3064         }
3065         return 0;
3066 }
3067
3068 static inline u16
3069 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3070 {
3071         u16 cons;
3072
3073         /* Tell compiler that status block fields can change. */
3074         barrier();
3075         cons = *bnapi->hw_rx_cons_ptr;
3076         barrier();
3077         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3078                 cons++;
3079         return cons;
3080 }
3081
3082 static int
3083 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3084 {
3085         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3086         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3087         struct l2_fhdr *rx_hdr;
3088         int rx_pkt = 0, pg_ring_used = 0;
3089         struct pci_dev *pdev = bp->pdev;
3090
3091         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3092         sw_cons = rxr->rx_cons;
3093         sw_prod = rxr->rx_prod;
3094
3095         /* Memory barrier necessary as speculative reads of the rx
3096          * buffer can be ahead of the index in the status block
3097          */
3098         rmb();
3099         while (sw_cons != hw_cons) {
3100                 unsigned int len, hdr_len;
3101                 u32 status;
3102                 struct sw_bd *rx_buf, *next_rx_buf;
3103                 struct sk_buff *skb;
3104                 dma_addr_t dma_addr;
3105                 u16 vtag = 0;
3106                 int hw_vlan __maybe_unused = 0;
3107
3108                 sw_ring_cons = RX_RING_IDX(sw_cons);
3109                 sw_ring_prod = RX_RING_IDX(sw_prod);
3110
3111                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3112                 skb = rx_buf->skb;
3113                 prefetchw(skb);
3114
3115                 if (!get_dma_ops(&pdev->dev)->sync_single_for_cpu) {
3116                         next_rx_buf =
3117                                 &rxr->rx_buf_ring[
3118                                         RX_RING_IDX(NEXT_RX_BD(sw_cons))];
3119                         prefetch(next_rx_buf->desc);
3120                 }
3121                 rx_buf->skb = NULL;
3122
3123                 dma_addr = dma_unmap_addr(rx_buf, mapping);
3124
3125                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
3126                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3127                         PCI_DMA_FROMDEVICE);
3128
3129                 rx_hdr = rx_buf->desc;
3130                 len = rx_hdr->l2_fhdr_pkt_len;
3131                 status = rx_hdr->l2_fhdr_status;
3132
3133                 hdr_len = 0;
3134                 if (status & L2_FHDR_STATUS_SPLIT) {
3135                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3136                         pg_ring_used = 1;
3137                 } else if (len > bp->rx_jumbo_thresh) {
3138                         hdr_len = bp->rx_jumbo_thresh;
3139                         pg_ring_used = 1;
3140                 }
3141
3142                 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3143                                        L2_FHDR_ERRORS_PHY_DECODE |
3144                                        L2_FHDR_ERRORS_ALIGNMENT |
3145                                        L2_FHDR_ERRORS_TOO_SHORT |
3146                                        L2_FHDR_ERRORS_GIANT_FRAME))) {
3147
3148                         bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3149                                           sw_ring_prod);
3150                         if (pg_ring_used) {
3151                                 int pages;
3152
3153                                 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3154
3155                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3156                         }
3157                         goto next_rx;
3158                 }
3159
3160                 len -= 4;
3161
3162                 if (len <= bp->rx_copy_thresh) {
3163                         struct sk_buff *new_skb;
3164
3165                         new_skb = netdev_alloc_skb(bp->dev, len + 6);
3166                         if (new_skb == NULL) {
3167                                 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3168                                                   sw_ring_prod);
3169                                 goto next_rx;
3170                         }
3171
3172                         /* aligned copy */
3173                         skb_copy_from_linear_data_offset(skb,
3174                                                          BNX2_RX_OFFSET - 6,
3175                                       new_skb->data, len + 6);
3176                         skb_reserve(new_skb, 6);
3177                         skb_put(new_skb, len);
3178
3179                         bnx2_reuse_rx_skb(bp, rxr, skb,
3180                                 sw_ring_cons, sw_ring_prod);
3181
3182                         skb = new_skb;
3183                 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
3184                            dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
3185                         goto next_rx;
3186
3187                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3188                     !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
3189                         vtag = rx_hdr->l2_fhdr_vlan_tag;
3190 #ifdef BCM_VLAN
3191                         if (bp->vlgrp)
3192                                 hw_vlan = 1;
3193                         else
3194 #endif
3195                         {
3196                                 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
3197                                         __skb_push(skb, 4);
3198
3199                                 memmove(ve, skb->data + 4, ETH_ALEN * 2);
3200                                 ve->h_vlan_proto = htons(ETH_P_8021Q);
3201                                 ve->h_vlan_TCI = htons(vtag);
3202                                 len += 4;
3203                         }
3204                 }
3205
3206                 skb->protocol = eth_type_trans(skb, bp->dev);
3207
3208                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3209                         (ntohs(skb->protocol) != 0x8100)) {
3210
3211                         dev_kfree_skb(skb);
3212                         goto next_rx;
3213
3214                 }
3215
3216                 skb->ip_summed = CHECKSUM_NONE;
3217                 if (bp->rx_csum &&
3218                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3219                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
3220
3221                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3222                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3223                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3224                 }
3225
3226                 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3227
3228 #ifdef BCM_VLAN
3229                 if (hw_vlan)
3230                         vlan_gro_receive(&bnapi->napi, bp->vlgrp, vtag, skb);
3231                 else
3232 #endif
3233                         napi_gro_receive(&bnapi->napi, skb);
3234
3235                 rx_pkt++;
3236
3237 next_rx:
3238                 sw_cons = NEXT_RX_BD(sw_cons);
3239                 sw_prod = NEXT_RX_BD(sw_prod);
3240
3241                 if ((rx_pkt == budget))
3242                         break;
3243
3244                 /* Refresh hw_cons to see if there is new work */
3245                 if (sw_cons == hw_cons) {
3246                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3247                         rmb();
3248                 }
3249         }
3250         rxr->rx_cons = sw_cons;
3251         rxr->rx_prod = sw_prod;
3252
3253         if (pg_ring_used)
3254                 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3255
3256         REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3257
3258         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3259
3260         mmiowb();
3261
3262         return rx_pkt;
3263
3264 }
3265
3266 /* MSI ISR - The only difference between this and the INTx ISR
3267  * is that the MSI interrupt is always serviced.
3268  */
3269 static irqreturn_t
3270 bnx2_msi(int irq, void *dev_instance)
3271 {
3272         struct bnx2_napi *bnapi = dev_instance;
3273         struct bnx2 *bp = bnapi->bp;
3274
3275         prefetch(bnapi->status_blk.msi);
3276         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3277                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3278                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3279
3280         /* Return here if interrupt is disabled. */
3281         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3282                 return IRQ_HANDLED;
3283
3284         napi_schedule(&bnapi->napi);
3285
3286         return IRQ_HANDLED;
3287 }
3288
3289 static irqreturn_t
3290 bnx2_msi_1shot(int irq, void *dev_instance)
3291 {
3292         struct bnx2_napi *bnapi = dev_instance;
3293         struct bnx2 *bp = bnapi->bp;
3294
3295         prefetch(bnapi->status_blk.msi);
3296
3297         /* Return here if interrupt is disabled. */
3298         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3299                 return IRQ_HANDLED;
3300
3301         napi_schedule(&bnapi->napi);
3302
3303         return IRQ_HANDLED;
3304 }
3305
3306 static irqreturn_t
3307 bnx2_interrupt(int irq, void *dev_instance)
3308 {
3309         struct bnx2_napi *bnapi = dev_instance;
3310         struct bnx2 *bp = bnapi->bp;
3311         struct status_block *sblk = bnapi->status_blk.msi;
3312
3313         /* When using INTx, it is possible for the interrupt to arrive
3314          * at the CPU before the status block posted prior to the
3315          * interrupt. Reading a register will flush the status block.
3316          * When using MSI, the MSI message will always complete after
3317          * the status block write.
3318          */
3319         if ((sblk->status_idx == bnapi->last_status_idx) &&
3320             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3321              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3322                 return IRQ_NONE;
3323
3324         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3325                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3326                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3327
3328         /* Read back to deassert IRQ immediately to avoid too many
3329          * spurious interrupts.
3330          */
3331         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3332
3333         /* Return here if interrupt is shared and is disabled. */
3334         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3335                 return IRQ_HANDLED;
3336
3337         if (napi_schedule_prep(&bnapi->napi)) {
3338                 bnapi->last_status_idx = sblk->status_idx;
3339                 __napi_schedule(&bnapi->napi);
3340         }
3341
3342         return IRQ_HANDLED;
3343 }
3344
3345 static inline int
3346 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3347 {
3348         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3349         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3350
3351         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3352             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3353                 return 1;
3354         return 0;
3355 }
3356
3357 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3358                                  STATUS_ATTN_BITS_TIMER_ABORT)
3359
3360 static inline int
3361 bnx2_has_work(struct bnx2_napi *bnapi)
3362 {
3363         struct status_block *sblk = bnapi->status_blk.msi;
3364
3365         if (bnx2_has_fast_work(bnapi))
3366                 return 1;
3367
3368 #ifdef BCM_CNIC
3369         if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3370                 return 1;
3371 #endif
3372
3373         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3374             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3375                 return 1;
3376
3377         return 0;
3378 }
3379
3380 static void
3381 bnx2_chk_missed_msi(struct bnx2 *bp)
3382 {
3383         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3384         u32 msi_ctrl;
3385
3386         if (bnx2_has_work(bnapi)) {
3387                 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3388                 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3389                         return;
3390
3391                 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3392                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3393                                ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3394                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3395                         bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3396                 }
3397         }
3398
3399         bp->idle_chk_status_idx = bnapi->last_status_idx;
3400 }
3401
3402 #ifdef BCM_CNIC
3403 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3404 {
3405         struct cnic_ops *c_ops;
3406
3407         if (!bnapi->cnic_present)
3408                 return;
3409
3410         rcu_read_lock();
3411         c_ops = rcu_dereference(bp->cnic_ops);
3412         if (c_ops)
3413                 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3414                                                       bnapi->status_blk.msi);
3415         rcu_read_unlock();
3416 }
3417 #endif
3418
3419 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3420 {
3421         struct status_block *sblk = bnapi->status_blk.msi;
3422         u32 status_attn_bits = sblk->status_attn_bits;
3423         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3424
3425         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3426             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3427
3428                 bnx2_phy_int(bp, bnapi);
3429
3430                 /* This is needed to take care of transient status
3431                  * during link changes.
3432                  */
3433                 REG_WR(bp, BNX2_HC_COMMAND,
3434                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3435                 REG_RD(bp, BNX2_HC_COMMAND);
3436         }
3437 }
3438
3439 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3440                           int work_done, int budget)
3441 {
3442         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3443         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3444
3445         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3446                 bnx2_tx_int(bp, bnapi, 0);
3447
3448         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3449                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3450
3451         return work_done;
3452 }
3453
3454 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3455 {
3456         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3457         struct bnx2 *bp = bnapi->bp;
3458         int work_done = 0;
3459         struct status_block_msix *sblk = bnapi->status_blk.msix;
3460
3461         while (1) {
3462                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3463                 if (unlikely(work_done >= budget))
3464                         break;
3465
3466                 bnapi->last_status_idx = sblk->status_idx;
3467                 /* status idx must be read before checking for more work. */
3468                 rmb();
3469                 if (likely(!bnx2_has_fast_work(bnapi))) {
3470
3471                         napi_complete(napi);
3472                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3473                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3474                                bnapi->last_status_idx);
3475                         break;
3476                 }
3477         }
3478         return work_done;
3479 }
3480
3481 static int bnx2_poll(struct napi_struct *napi, int budget)
3482 {
3483         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3484         struct bnx2 *bp = bnapi->bp;
3485         int work_done = 0;
3486         struct status_block *sblk = bnapi->status_blk.msi;
3487
3488         while (1) {
3489                 bnx2_poll_link(bp, bnapi);
3490
3491                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3492
3493 #ifdef BCM_CNIC
3494                 bnx2_poll_cnic(bp, bnapi);
3495 #endif
3496
3497                 /* bnapi->last_status_idx is used below to tell the hw how
3498                  * much work has been processed, so we must read it before
3499                  * checking for more work.
3500                  */
3501                 bnapi->last_status_idx = sblk->status_idx;
3502
3503                 if (unlikely(work_done >= budget))
3504                         break;
3505
3506                 rmb();
3507                 if (likely(!bnx2_has_work(bnapi))) {
3508                         napi_complete(napi);
3509                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3510                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3511                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3512                                        bnapi->last_status_idx);
3513                                 break;
3514                         }
3515                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3516                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3517                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3518                                bnapi->last_status_idx);
3519
3520                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3521                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3522                                bnapi->last_status_idx);
3523                         break;
3524                 }
3525         }
3526
3527         return work_done;
3528 }
3529
3530 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3531  * from set_multicast.
3532  */
3533 static void
3534 bnx2_set_rx_mode(struct net_device *dev)
3535 {
3536         struct bnx2 *bp = netdev_priv(dev);
3537         u32 rx_mode, sort_mode;
3538         struct netdev_hw_addr *ha;
3539         int i;
3540
3541         if (!netif_running(dev))
3542                 return;
3543
3544         spin_lock_bh(&bp->phy_lock);
3545
3546         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3547                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3548         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3549 #ifdef BCM_VLAN
3550         if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3551                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3552 #else
3553         if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
3554                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3555 #endif
3556         if (dev->flags & IFF_PROMISC) {
3557                 /* Promiscuous mode. */
3558                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3559                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3560                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3561         }
3562         else if (dev->flags & IFF_ALLMULTI) {
3563                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3564                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3565                                0xffffffff);
3566                 }
3567                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3568         }
3569         else {
3570                 /* Accept one or more multicast(s). */
3571                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3572                 u32 regidx;
3573                 u32 bit;
3574                 u32 crc;
3575
3576                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3577
3578                 netdev_for_each_mc_addr(ha, dev) {
3579                         crc = ether_crc_le(ETH_ALEN, ha->addr);
3580                         bit = crc & 0xff;
3581                         regidx = (bit & 0xe0) >> 5;
3582                         bit &= 0x1f;
3583                         mc_filter[regidx] |= (1 << bit);
3584                 }
3585
3586                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3587                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3588                                mc_filter[i]);
3589                 }
3590
3591                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3592         }
3593
3594         if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3595                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3596                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3597                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3598         } else if (!(dev->flags & IFF_PROMISC)) {
3599                 /* Add all entries into to the match filter list */
3600                 i = 0;
3601                 netdev_for_each_uc_addr(ha, dev) {
3602                         bnx2_set_mac_addr(bp, ha->addr,
3603                                           i + BNX2_START_UNICAST_ADDRESS_INDEX);
3604                         sort_mode |= (1 <<
3605                                       (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3606                         i++;
3607                 }
3608
3609         }
3610
3611         if (rx_mode != bp->rx_mode) {
3612                 bp->rx_mode = rx_mode;
3613                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3614         }
3615
3616         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3617         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3618         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3619
3620         spin_unlock_bh(&bp->phy_lock);
3621 }
3622
3623 static int __devinit
3624 check_fw_section(const struct firmware *fw,
3625                  const struct bnx2_fw_file_section *section,
3626                  u32 alignment, bool non_empty)
3627 {
3628         u32 offset = be32_to_cpu(section->offset);
3629         u32 len = be32_to_cpu(section->len);
3630
3631         if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3632                 return -EINVAL;
3633         if ((non_empty && len == 0) || len > fw->size - offset ||
3634             len & (alignment - 1))
3635                 return -EINVAL;
3636         return 0;
3637 }
3638
3639 static int __devinit
3640 check_mips_fw_entry(const struct firmware *fw,
3641                     const struct bnx2_mips_fw_file_entry *entry)
3642 {
3643         if (check_fw_section(fw, &entry->text, 4, true) ||
3644             check_fw_section(fw, &entry->data, 4, false) ||
3645             check_fw_section(fw, &entry->rodata, 4, false))
3646                 return -EINVAL;
3647         return 0;
3648 }
3649
3650 static int __devinit
3651 bnx2_request_firmware(struct bnx2 *bp)
3652 {
3653         const char *mips_fw_file, *rv2p_fw_file;
3654         const struct bnx2_mips_fw_file *mips_fw;
3655         const struct bnx2_rv2p_fw_file *rv2p_fw;
3656         int rc;
3657
3658         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3659                 mips_fw_file = FW_MIPS_FILE_09;
3660                 if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3661                     (CHIP_ID(bp) == CHIP_ID_5709_A1))
3662                         rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3663                 else
3664                         rv2p_fw_file = FW_RV2P_FILE_09;
3665         } else {
3666                 mips_fw_file = FW_MIPS_FILE_06;
3667                 rv2p_fw_file = FW_RV2P_FILE_06;
3668         }
3669
3670         rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3671         if (rc) {
3672                 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3673                 return rc;
3674         }
3675
3676         rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3677         if (rc) {
3678                 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3679                 return rc;
3680         }
3681         mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3682         rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3683         if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3684             check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3685             check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3686             check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3687             check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3688             check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3689                 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3690                 return -EINVAL;
3691         }
3692         if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3693             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3694             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3695                 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3696                 return -EINVAL;
3697         }
3698
3699         return 0;
3700 }
3701
3702 static u32
3703 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3704 {
3705         switch (idx) {
3706         case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3707                 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3708                 rv2p_code |= RV2P_BD_PAGE_SIZE;
3709                 break;
3710         }
3711         return rv2p_code;
3712 }
3713
3714 static int
3715 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3716              const struct bnx2_rv2p_fw_file_entry *fw_entry)
3717 {
3718         u32 rv2p_code_len, file_offset;
3719         __be32 *rv2p_code;
3720         int i;
3721         u32 val, cmd, addr;
3722
3723         rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3724         file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3725
3726         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3727
3728         if (rv2p_proc == RV2P_PROC1) {
3729                 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3730                 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3731         } else {
3732                 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3733                 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3734         }
3735
3736         for (i = 0; i < rv2p_code_len; i += 8) {
3737                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3738                 rv2p_code++;
3739                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3740                 rv2p_code++;
3741
3742                 val = (i / 8) | cmd;
3743                 REG_WR(bp, addr, val);
3744         }
3745
3746         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3747         for (i = 0; i < 8; i++) {
3748                 u32 loc, code;
3749
3750                 loc = be32_to_cpu(fw_entry->fixup[i]);
3751                 if (loc && ((loc * 4) < rv2p_code_len)) {
3752                         code = be32_to_cpu(*(rv2p_code + loc - 1));
3753                         REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3754                         code = be32_to_cpu(*(rv2p_code + loc));
3755                         code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3756                         REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3757
3758                         val = (loc / 2) | cmd;
3759                         REG_WR(bp, addr, val);
3760                 }
3761         }
3762
3763         /* Reset the processor, un-stall is done later. */
3764         if (rv2p_proc == RV2P_PROC1) {
3765                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3766         }
3767         else {
3768                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3769         }
3770
3771         return 0;
3772 }
3773
3774 static int
3775 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3776             const struct bnx2_mips_fw_file_entry *fw_entry)
3777 {
3778         u32 addr, len, file_offset;
3779         __be32 *data;
3780         u32 offset;
3781         u32 val;
3782
3783         /* Halt the CPU. */
3784         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3785         val |= cpu_reg->mode_value_halt;
3786         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3787         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3788
3789         /* Load the Text area. */
3790         addr = be32_to_cpu(fw_entry->text.addr);
3791         len = be32_to_cpu(fw_entry->text.len);
3792         file_offset = be32_to_cpu(fw_entry->text.offset);
3793         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3794
3795         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3796         if (len) {
3797                 int j;
3798
3799                 for (j = 0; j < (len / 4); j++, offset += 4)
3800                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3801         }
3802
3803         /* Load the Data area. */
3804         addr = be32_to_cpu(fw_entry->data.addr);
3805         len = be32_to_cpu(fw_entry->data.len);
3806         file_offset = be32_to_cpu(fw_entry->data.offset);
3807         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3808
3809         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3810         if (len) {
3811                 int j;
3812
3813                 for (j = 0; j < (len / 4); j++, offset += 4)
3814                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3815         }
3816
3817         /* Load the Read-Only area. */
3818         addr = be32_to_cpu(fw_entry->rodata.addr);
3819         len = be32_to_cpu(fw_entry->rodata.len);
3820         file_offset = be32_to_cpu(fw_entry->rodata.offset);
3821         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3822
3823         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3824         if (len) {
3825                 int j;
3826
3827                 for (j = 0; j < (len / 4); j++, offset += 4)
3828                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3829         }
3830
3831         /* Clear the pre-fetch instruction. */
3832         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3833
3834         val = be32_to_cpu(fw_entry->start_addr);
3835         bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3836
3837         /* Start the CPU. */
3838         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3839         val &= ~cpu_reg->mode_value_halt;
3840         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3841         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3842
3843         return 0;
3844 }
3845
3846 static int
3847 bnx2_init_cpus(struct bnx2 *bp)
3848 {
3849         const struct bnx2_mips_fw_file *mips_fw =
3850                 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3851         const struct bnx2_rv2p_fw_file *rv2p_fw =
3852                 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3853         int rc;
3854
3855         /* Initialize the RV2P processor. */
3856         load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3857         load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3858
3859         /* Initialize the RX Processor. */
3860         rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3861         if (rc)
3862                 goto init_cpu_err;
3863
3864         /* Initialize the TX Processor. */
3865         rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3866         if (rc)
3867                 goto init_cpu_err;
3868
3869         /* Initialize the TX Patch-up Processor. */
3870         rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3871         if (rc)
3872                 goto init_cpu_err;
3873
3874         /* Initialize the Completion Processor. */
3875         rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3876         if (rc)
3877                 goto init_cpu_err;
3878
3879         /* Initialize the Command Processor. */
3880         rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3881
3882 init_cpu_err:
3883         return rc;
3884 }
3885
3886 static int
3887 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3888 {
3889         u16 pmcsr;
3890
3891         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3892
3893         switch (state) {
3894         case PCI_D0: {
3895                 u32 val;
3896
3897                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3898                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3899                         PCI_PM_CTRL_PME_STATUS);
3900
3901                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3902                         /* delay required during transition out of D3hot */
3903                         msleep(20);
3904
3905                 val = REG_RD(bp, BNX2_EMAC_MODE);
3906                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3907                 val &= ~BNX2_EMAC_MODE_MPKT;
3908                 REG_WR(bp, BNX2_EMAC_MODE, val);
3909
3910                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3911                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3912                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3913                 break;
3914         }
3915         case PCI_D3hot: {
3916                 int i;
3917                 u32 val, wol_msg;
3918
3919                 if (bp->wol) {
3920                         u32 advertising;
3921                         u8 autoneg;
3922
3923                         autoneg = bp->autoneg;
3924                         advertising = bp->advertising;
3925
3926                         if (bp->phy_port == PORT_TP) {
3927                                 bp->autoneg = AUTONEG_SPEED;
3928                                 bp->advertising = ADVERTISED_10baseT_Half |
3929                                         ADVERTISED_10baseT_Full |
3930                                         ADVERTISED_100baseT_Half |
3931                                         ADVERTISED_100baseT_Full |
3932                                         ADVERTISED_Autoneg;
3933                         }
3934
3935                         spin_lock_bh(&bp->phy_lock);
3936                         bnx2_setup_phy(bp, bp->phy_port);
3937                         spin_unlock_bh(&bp->phy_lock);
3938
3939                         bp->autoneg = autoneg;
3940                         bp->advertising = advertising;
3941
3942                         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3943
3944                         val = REG_RD(bp, BNX2_EMAC_MODE);
3945
3946                         /* Enable port mode. */
3947                         val &= ~BNX2_EMAC_MODE_PORT;
3948                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3949                                BNX2_EMAC_MODE_ACPI_RCVD |
3950                                BNX2_EMAC_MODE_MPKT;
3951                         if (bp->phy_port == PORT_TP)
3952                                 val |= BNX2_EMAC_MODE_PORT_MII;
3953                         else {
3954                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3955                                 if (bp->line_speed == SPEED_2500)
3956                                         val |= BNX2_EMAC_MODE_25G_MODE;
3957                         }
3958
3959                         REG_WR(bp, BNX2_EMAC_MODE, val);
3960
3961                         /* receive all multicast */
3962                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3963                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3964                                        0xffffffff);
3965                         }
3966                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3967                                BNX2_EMAC_RX_MODE_SORT_MODE);
3968
3969                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3970                               BNX2_RPM_SORT_USER0_MC_EN;
3971                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3972                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3973                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3974                                BNX2_RPM_SORT_USER0_ENA);
3975
3976                         /* Need to enable EMAC and RPM for WOL. */
3977                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3978                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3979                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3980                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3981
3982                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3983                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3984                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3985
3986                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3987                 }
3988                 else {
3989                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3990                 }
3991
3992                 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3993                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3994                                      1, 0);
3995
3996                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3997                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3998                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3999
4000                         if (bp->wol)
4001                                 pmcsr |= 3;
4002                 }
4003                 else {
4004                         pmcsr |= 3;
4005                 }
4006                 if (bp->wol) {
4007                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
4008                 }
4009                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
4010                                       pmcsr);
4011
4012                 /* No more memory access after this point until
4013                  * device is brought back to D0.
4014                  */
4015                 udelay(50);
4016                 break;
4017         }
4018         default:
4019                 return -EINVAL;
4020         }
4021         return 0;
4022 }
4023
4024 static int
4025 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4026 {
4027         u32 val;
4028         int j;
4029
4030         /* Request access to the flash interface. */
4031         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4032         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4033                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4034                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4035                         break;
4036
4037                 udelay(5);
4038         }
4039
4040         if (j >= NVRAM_TIMEOUT_COUNT)
4041                 return -EBUSY;
4042
4043         return 0;
4044 }
4045
4046 static int
4047 bnx2_release_nvram_lock(struct bnx2 *bp)
4048 {
4049         int j;
4050         u32 val;
4051
4052         /* Relinquish nvram interface. */
4053         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4054
4055         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4056                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4057                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4058                         break;
4059
4060                 udelay(5);
4061         }
4062
4063         if (j >= NVRAM_TIMEOUT_COUNT)
4064                 return -EBUSY;
4065
4066         return 0;
4067 }
4068
4069
4070 static int
4071 bnx2_enable_nvram_write(struct bnx2 *bp)
4072 {
4073         u32 val;
4074
4075         val = REG_RD(bp, BNX2_MISC_CFG);
4076         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4077
4078         if (bp->flash_info->flags & BNX2_NV_WREN) {
4079                 int j;
4080
4081                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4082                 REG_WR(bp, BNX2_NVM_COMMAND,
4083                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4084
4085                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4086                         udelay(5);
4087
4088                         val = REG_RD(bp, BNX2_NVM_COMMAND);
4089                         if (val & BNX2_NVM_COMMAND_DONE)
4090                                 break;
4091                 }
4092
4093                 if (j >= NVRAM_TIMEOUT_COUNT)
4094                         return -EBUSY;
4095         }
4096         return 0;
4097 }
4098
4099 static void
4100 bnx2_disable_nvram_write(struct bnx2 *bp)
4101 {
4102         u32 val;
4103
4104         val = REG_RD(bp, BNX2_MISC_CFG);
4105         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4106 }
4107
4108
4109 static void
4110 bnx2_enable_nvram_access(struct bnx2 *bp)
4111 {
4112         u32 val;
4113
4114         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4115         /* Enable both bits, even on read. */
4116         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4117                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4118 }
4119
4120 static void
4121 bnx2_disable_nvram_access(struct bnx2 *bp)
4122 {
4123         u32 val;
4124
4125         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4126         /* Disable both bits, even after read. */
4127         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4128                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4129                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
4130 }
4131
4132 static int
4133 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4134 {
4135         u32 cmd;
4136         int j;
4137
4138         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4139                 /* Buffered flash, no erase needed */
4140                 return 0;
4141
4142         /* Build an erase command */
4143         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4144               BNX2_NVM_COMMAND_DOIT;
4145
4146         /* Need to clear DONE bit separately. */
4147         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4148
4149         /* Address of the NVRAM to read from. */
4150         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4151
4152         /* Issue an erase command. */
4153         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4154
4155         /* Wait for completion. */
4156         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4157                 u32 val;
4158
4159                 udelay(5);
4160
4161                 val = REG_RD(bp, BNX2_NVM_COMMAND);
4162                 if (val & BNX2_NVM_COMMAND_DONE)
4163                         break;
4164         }
4165
4166         if (j >= NVRAM_TIMEOUT_COUNT)
4167                 return -EBUSY;
4168
4169         return 0;
4170 }
4171
4172 static int
4173 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4174 {
4175         u32 cmd;
4176         int j;
4177
4178         /* Build the command word. */
4179         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4180
4181         /* Calculate an offset of a buffered flash, not needed for 5709. */
4182         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4183                 offset = ((offset / bp->flash_info->page_size) <<
4184                            bp->flash_info->page_bits) +
4185                           (offset % bp->flash_info->page_size);
4186         }
4187
4188         /* Need to clear DONE bit separately. */
4189         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4190
4191         /* Address of the NVRAM to read from. */
4192         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4193
4194         /* Issue a read command. */
4195         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4196
4197         /* Wait for completion. */
4198         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4199                 u32 val;
4200
4201                 udelay(5);
4202
4203                 val = REG_RD(bp, BNX2_NVM_COMMAND);
4204                 if (val & BNX2_NVM_COMMAND_DONE) {
4205                         __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4206                         memcpy(ret_val, &v, 4);
4207                         break;
4208                 }
4209         }
4210         if (j >= NVRAM_TIMEOUT_COUNT)
4211                 return -EBUSY;
4212
4213         return 0;
4214 }
4215
4216
4217 static int
4218 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4219 {
4220         u32 cmd;
4221         __be32 val32;
4222         int j;
4223
4224         /* Build the command word. */
4225         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4226
4227         /* Calculate an offset of a buffered flash, not needed for 5709. */
4228         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4229                 offset = ((offset / bp->flash_info->page_size) <<
4230                           bp->flash_info->page_bits) +
4231                          (offset % bp->flash_info->page_size);
4232         }
4233
4234         /* Need to clear DONE bit separately. */
4235         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4236
4237         memcpy(&val32, val, 4);
4238
4239         /* Write the data. */
4240         REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4241
4242         /* Address of the NVRAM to write to. */
4243         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4244
4245         /* Issue the write command. */
4246         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4247
4248         /* Wait for completion. */
4249         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4250                 udelay(5);
4251
4252                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4253                         break;
4254         }
4255         if (j >= NVRAM_TIMEOUT_COUNT)
4256                 return -EBUSY;
4257
4258         return 0;
4259 }
4260
4261 static int
4262 bnx2_init_nvram(struct bnx2 *bp)
4263 {
4264         u32 val;
4265         int j, entry_count, rc = 0;
4266         const struct flash_spec *flash;
4267
4268         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4269                 bp->flash_info = &flash_5709;
4270                 goto get_flash_size;
4271         }
4272
4273         /* Determine the selected interface. */
4274         val = REG_RD(bp, BNX2_NVM_CFG1);
4275
4276         entry_count = ARRAY_SIZE(flash_table);
4277
4278         if (val & 0x40000000) {
4279
4280                 /* Flash interface has been reconfigured */
4281                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4282                      j++, flash++) {
4283                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
4284                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4285                                 bp->flash_info = flash;
4286                                 break;
4287                         }
4288                 }
4289         }
4290         else {
4291                 u32 mask;
4292                 /* Not yet been reconfigured */
4293
4294                 if (val & (1 << 23))
4295                         mask = FLASH_BACKUP_STRAP_MASK;
4296                 else
4297                         mask = FLASH_STRAP_MASK;
4298
4299                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4300                         j++, flash++) {
4301
4302                         if ((val & mask) == (flash->strapping & mask)) {
4303                                 bp->flash_info = flash;
4304
4305                                 /* Request access to the flash interface. */
4306                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4307                                         return rc;
4308
4309                                 /* Enable access to flash interface */
4310                                 bnx2_enable_nvram_access(bp);
4311
4312                                 /* Reconfigure the flash interface */
4313                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4314                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4315                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4316                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4317
4318                                 /* Disable access to flash interface */
4319                                 bnx2_disable_nvram_access(bp);
4320                                 bnx2_release_nvram_lock(bp);
4321
4322                                 break;
4323                         }
4324                 }
4325         } /* if (val & 0x40000000) */
4326
4327         if (j == entry_count) {
4328                 bp->flash_info = NULL;
4329                 pr_alert("Unknown flash/EEPROM type\n");
4330                 return -ENODEV;
4331         }
4332
4333 get_flash_size:
4334         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4335         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4336         if (val)
4337                 bp->flash_size = val;
4338         else
4339                 bp->flash_size = bp->flash_info->total_size;
4340
4341         return rc;
4342 }
4343
4344 static int
4345 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4346                 int buf_size)
4347 {
4348         int rc = 0;
4349         u32 cmd_flags, offset32, len32, extra;
4350
4351         if (buf_size == 0)
4352                 return 0;
4353
4354         /* Request access to the flash interface. */
4355         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4356                 return rc;
4357
4358         /* Enable access to flash interface */
4359         bnx2_enable_nvram_access(bp);
4360
4361         len32 = buf_size;
4362         offset32 = offset;
4363         extra = 0;
4364
4365         cmd_flags = 0;
4366
4367         if (offset32 & 3) {
4368                 u8 buf[4];
4369                 u32 pre_len;
4370
4371                 offset32 &= ~3;
4372                 pre_len = 4 - (offset & 3);
4373
4374                 if (pre_len >= len32) {
4375                         pre_len = len32;
4376                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4377                                     BNX2_NVM_COMMAND_LAST;
4378                 }
4379                 else {
4380                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4381                 }
4382
4383                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4384
4385                 if (rc)
4386                         return rc;
4387
4388                 memcpy(ret_buf, buf + (offset & 3), pre_len);
4389
4390                 offset32 += 4;
4391                 ret_buf += pre_len;
4392                 len32 -= pre_len;
4393         }
4394         if (len32 & 3) {
4395                 extra = 4 - (len32 & 3);
4396                 len32 = (len32 + 4) & ~3;
4397         }
4398
4399         if (len32 == 4) {
4400                 u8 buf[4];
4401
4402                 if (cmd_flags)
4403                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4404                 else
4405                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4406                                     BNX2_NVM_COMMAND_LAST;
4407
4408                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4409
4410                 memcpy(ret_buf, buf, 4 - extra);
4411         }
4412         else if (len32 > 0) {
4413                 u8 buf[4];
4414
4415                 /* Read the first word. */
4416                 if (cmd_flags)
4417                         cmd_flags = 0;
4418                 else
4419                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4420
4421                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4422
4423                 /* Advance to the next dword. */
4424                 offset32 += 4;
4425                 ret_buf += 4;
4426                 len32 -= 4;
4427
4428                 while (len32 > 4 && rc == 0) {
4429                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4430
4431                         /* Advance to the next dword. */
4432                         offset32 += 4;
4433                         ret_buf += 4;
4434                         len32 -= 4;
4435                 }
4436
4437                 if (rc)
4438                         return rc;
4439
4440                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4441                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4442
4443                 memcpy(ret_buf, buf, 4 - extra);
4444         }
4445
4446         /* Disable access to flash interface */
4447         bnx2_disable_nvram_access(bp);
4448
4449         bnx2_release_nvram_lock(bp);
4450
4451         return rc;
4452 }
4453
4454 static int
4455 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4456                 int buf_size)
4457 {
4458         u32 written, offset32, len32;
4459         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4460         int rc = 0;
4461         int align_start, align_end;
4462
4463         buf = data_buf;
4464         offset32 = offset;
4465         len32 = buf_size;
4466         align_start = align_end = 0;
4467
4468         if ((align_start = (offset32 & 3))) {
4469                 offset32 &= ~3;
4470                 len32 += align_start;
4471                 if (len32 < 4)
4472                         len32 = 4;
4473                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4474                         return rc;
4475         }
4476
4477         if (len32 & 3) {
4478                 align_end = 4 - (len32 & 3);
4479                 len32 += align_end;
4480                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4481                         return rc;
4482         }
4483
4484         if (align_start || align_end) {
4485                 align_buf = kmalloc(len32, GFP_KERNEL);
4486                 if (align_buf == NULL)
4487                         return -ENOMEM;
4488                 if (align_start) {
4489                         memcpy(align_buf, start, 4);
4490                 }
4491                 if (align_end) {
4492                         memcpy(align_buf + len32 - 4, end, 4);
4493                 }
4494                 memcpy(align_buf + align_start, data_buf, buf_size);
4495                 buf = align_buf;
4496         }
4497
4498         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4499                 flash_buffer = kmalloc(264, GFP_KERNEL);
4500                 if (flash_buffer == NULL) {
4501                         rc = -ENOMEM;
4502                         goto nvram_write_end;
4503                 }
4504         }
4505
4506         written = 0;
4507         while ((written < len32) && (rc == 0)) {
4508                 u32 page_start, page_end, data_start, data_end;
4509                 u32 addr, cmd_flags;
4510                 int i;
4511
4512                 /* Find the page_start addr */
4513                 page_start = offset32 + written;
4514                 page_start -= (page_start % bp->flash_info->page_size);
4515                 /* Find the page_end addr */
4516                 page_end = page_start + bp->flash_info->page_size;
4517                 /* Find the data_start addr */
4518                 data_start = (written == 0) ? offset32 : page_start;
4519                 /* Find the data_end addr */
4520                 data_end = (page_end > offset32 + len32) ?
4521                         (offset32 + len32) : page_end;
4522
4523                 /* Request access to the flash interface. */
4524                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4525                         goto nvram_write_end;
4526
4527                 /* Enable access to flash interface */
4528                 bnx2_enable_nvram_access(bp);
4529
4530                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4531                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4532                         int j;
4533
4534                         /* Read the whole page into the buffer
4535                          * (non-buffer flash only) */
4536                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4537                                 if (j == (bp->flash_info->page_size - 4)) {
4538                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4539                                 }
4540                                 rc = bnx2_nvram_read_dword(bp,
4541                                         page_start + j,
4542                                         &flash_buffer[j],
4543                                         cmd_flags);
4544
4545                                 if (rc)
4546                                         goto nvram_write_end;
4547
4548                                 cmd_flags = 0;
4549                         }
4550                 }
4551
4552                 /* Enable writes to flash interface (unlock write-protect) */
4553                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4554                         goto nvram_write_end;
4555
4556                 /* Loop to write back the buffer data from page_start to
4557                  * data_start */
4558                 i = 0;
4559                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4560                         /* Erase the page */
4561                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4562                                 goto nvram_write_end;
4563
4564                         /* Re-enable the write again for the actual write */
4565                         bnx2_enable_nvram_write(bp);
4566
4567                         for (addr = page_start; addr < data_start;
4568                                 addr += 4, i += 4) {
4569
4570                                 rc = bnx2_nvram_write_dword(bp, addr,
4571                                         &flash_buffer[i], cmd_flags);
4572
4573                                 if (rc != 0)
4574                                         goto nvram_write_end;
4575
4576                                 cmd_flags = 0;
4577                         }
4578                 }
4579
4580                 /* Loop to write the new data from data_start to data_end */
4581                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4582                         if ((addr == page_end - 4) ||
4583                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4584                                  (addr == data_end - 4))) {
4585
4586                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4587                         }
4588                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4589                                 cmd_flags);
4590
4591                         if (rc != 0)
4592                                 goto nvram_write_end;
4593
4594                         cmd_flags = 0;
4595                         buf += 4;
4596                 }
4597
4598                 /* Loop to write back the buffer data from data_end
4599                  * to page_end */
4600                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4601                         for (addr = data_end; addr < page_end;
4602                                 addr += 4, i += 4) {
4603
4604                                 if (addr == page_end-4) {
4605                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4606                                 }
4607                                 rc = bnx2_nvram_write_dword(bp, addr,
4608                                         &flash_buffer[i], cmd_flags);
4609
4610                                 if (rc != 0)
4611                                         goto nvram_write_end;
4612
4613                                 cmd_flags = 0;
4614                         }
4615                 }
4616
4617                 /* Disable writes to flash interface (lock write-protect) */
4618                 bnx2_disable_nvram_write(bp);
4619
4620                 /* Disable access to flash interface */
4621                 bnx2_disable_nvram_access(bp);
4622                 bnx2_release_nvram_lock(bp);
4623
4624                 /* Increment written */
4625                 written += data_end - data_start;
4626         }
4627
4628 nvram_write_end:
4629         kfree(flash_buffer);
4630         kfree(align_buf);
4631         return rc;
4632 }
4633
4634 static void
4635 bnx2_init_fw_cap(struct bnx2 *bp)
4636 {
4637         u32 val, sig = 0;
4638
4639         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4640         bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4641
4642         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4643                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4644
4645         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4646         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4647                 return;
4648
4649         if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4650                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4651                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4652         }
4653
4654         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4655             (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4656                 u32 link;
4657
4658                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4659
4660                 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4661                 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4662                         bp->phy_port = PORT_FIBRE;
4663                 else
4664                         bp->phy_port = PORT_TP;
4665
4666                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4667                        BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4668         }
4669
4670         if (netif_running(bp->dev) && sig)
4671                 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4672 }
4673
4674 static void
4675 bnx2_setup_msix_tbl(struct bnx2 *bp)
4676 {
4677         REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4678
4679         REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4680         REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4681 }
4682
4683 static int
4684 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4685 {
4686         u32 val;
4687         int i, rc = 0;
4688         u8 old_port;
4689
4690         /* Wait for the current PCI transaction to complete before
4691          * issuing a reset. */
4692         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4693                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4694                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4695                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4696                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4697         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4698         udelay(5);
4699
4700         /* Wait for the firmware to tell us it is ok to issue a reset. */
4701         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4702
4703         /* Deposit a driver reset signature so the firmware knows that
4704          * this is a soft reset. */
4705         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4706                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4707
4708         /* Do a dummy read to force the chip to complete all current transaction
4709          * before we issue a reset. */
4710         val = REG_RD(bp, BNX2_MISC_ID);
4711
4712         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4713                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4714                 REG_RD(bp, BNX2_MISC_COMMAND);
4715                 udelay(5);
4716
4717                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4718                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4719
4720                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4721
4722         } else {
4723                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4724                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4725                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4726
4727                 /* Chip reset. */
4728                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4729
4730                 /* Reading back any register after chip reset will hang the
4731                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4732                  * of margin for write posting.
4733                  */
4734                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4735                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4736                         msleep(20);
4737
4738                 /* Reset takes approximate 30 usec */
4739                 for (i = 0; i < 10; i++) {
4740                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4741                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4742                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4743                                 break;
4744                         udelay(10);
4745                 }
4746
4747                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4748                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4749                         pr_err("Chip reset did not complete\n");
4750                         return -EBUSY;
4751                 }
4752         }
4753
4754         /* Make sure byte swapping is properly configured. */
4755         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4756         if (val != 0x01020304) {
4757                 pr_err("Chip not in correct endian mode\n");
4758                 return -ENODEV;
4759         }
4760
4761         /* Wait for the firmware to finish its initialization. */
4762         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4763         if (rc)
4764                 return rc;
4765
4766         spin_lock_bh(&bp->phy_lock);
4767         old_port = bp->phy_port;
4768         bnx2_init_fw_cap(bp);
4769         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4770             old_port != bp->phy_port)
4771                 bnx2_set_default_remote_link(bp);
4772         spin_unlock_bh(&bp->phy_lock);
4773
4774         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4775                 /* Adjust the voltage regular to two steps lower.  The default
4776                  * of this register is 0x0000000e. */
4777                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4778
4779                 /* Remove bad rbuf memory from the free pool. */
4780                 rc = bnx2_alloc_bad_rbuf(bp);
4781         }
4782
4783         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4784                 bnx2_setup_msix_tbl(bp);
4785                 /* Prevent MSIX table reads and write from timing out */
4786                 REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
4787                         BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4788         }
4789
4790         return rc;
4791 }
4792
4793 static int
4794 bnx2_init_chip(struct bnx2 *bp)
4795 {
4796         u32 val, mtu;
4797         int rc, i;
4798
4799         /* Make sure the interrupt is not active. */
4800         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4801
4802         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4803               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4804 #ifdef __BIG_ENDIAN
4805               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4806 #endif
4807               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4808               DMA_READ_CHANS << 12 |
4809               DMA_WRITE_CHANS << 16;
4810
4811         val |= (0x2 << 20) | (1 << 11);
4812
4813         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4814                 val |= (1 << 23);
4815
4816         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4817             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4818                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4819
4820         REG_WR(bp, BNX2_DMA_CONFIG, val);
4821
4822         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4823                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4824                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4825                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4826         }
4827
4828         if (bp->flags & BNX2_FLAG_PCIX) {
4829                 u16 val16;
4830
4831                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4832                                      &val16);
4833                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4834                                       val16 & ~PCI_X_CMD_ERO);
4835         }
4836
4837         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4838                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4839                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4840                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4841
4842         /* Initialize context mapping and zero out the quick contexts.  The
4843          * context block must have already been enabled. */
4844         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4845                 rc = bnx2_init_5709_context(bp);
4846                 if (rc)
4847                         return rc;
4848         } else
4849                 bnx2_init_context(bp);
4850
4851         if ((rc = bnx2_init_cpus(bp)) != 0)
4852                 return rc;
4853
4854         bnx2_init_nvram(bp);
4855
4856         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4857
4858         val = REG_RD(bp, BNX2_MQ_CONFIG);
4859         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4860         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4861         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4862                 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4863                 if (CHIP_REV(bp) == CHIP_REV_Ax)
4864                         val |= BNX2_MQ_CONFIG_HALT_DIS;
4865         }
4866
4867         REG_WR(bp, BNX2_MQ_CONFIG, val);
4868
4869         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4870         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4871         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4872
4873         val = (BCM_PAGE_BITS - 8) << 24;
4874         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4875
4876         /* Configure page size. */
4877         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4878         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4879         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4880         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4881
4882         val = bp->mac_addr[0] +
4883               (bp->mac_addr[1] << 8) +
4884               (bp->mac_addr[2] << 16) +
4885               bp->mac_addr[3] +
4886               (bp->mac_addr[4] << 8) +
4887               (bp->mac_addr[5] << 16);
4888         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4889
4890         /* Program the MTU.  Also include 4 bytes for CRC32. */
4891         mtu = bp->dev->mtu;
4892         val = mtu + ETH_HLEN + ETH_FCS_LEN;
4893         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4894                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4895         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4896
4897         if (mtu < 1500)
4898                 mtu = 1500;
4899
4900         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4901         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4902         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4903
4904         memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4905         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4906                 bp->bnx2_napi[i].last_status_idx = 0;
4907
4908         bp->idle_chk_status_idx = 0xffff;
4909
4910         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4911
4912         /* Set up how to generate a link change interrupt. */
4913         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4914
4915         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4916                (u64) bp->status_blk_mapping & 0xffffffff);
4917         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4918
4919         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4920                (u64) bp->stats_blk_mapping & 0xffffffff);
4921         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4922                (u64) bp->stats_blk_mapping >> 32);
4923
4924         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4925                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4926
4927         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4928                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4929
4930         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4931                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4932
4933         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4934
4935         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4936
4937         REG_WR(bp, BNX2_HC_COM_TICKS,
4938                (bp->com_ticks_int << 16) | bp->com_ticks);
4939
4940         REG_WR(bp, BNX2_HC_CMD_TICKS,
4941                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4942
4943         if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4944                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4945         else
4946                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4947         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4948
4949         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4950                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4951         else {
4952                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4953                       BNX2_HC_CONFIG_COLLECT_STATS;
4954         }
4955
4956         if (bp->flags & BNX2_FLAG_USING_MSIX) {
4957                 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4958                        BNX2_HC_MSIX_BIT_VECTOR_VAL);
4959
4960                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4961         }
4962
4963         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4964                 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
4965
4966         REG_WR(bp, BNX2_HC_CONFIG, val);
4967
4968         for (i = 1; i < bp->irq_nvecs; i++) {
4969                 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4970                            BNX2_HC_SB_CONFIG_1;
4971
4972                 REG_WR(bp, base,
4973                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4974                         BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4975                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4976
4977                 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4978                         (bp->tx_quick_cons_trip_int << 16) |
4979                          bp->tx_quick_cons_trip);
4980
4981                 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4982                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
4983
4984                 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4985                        (bp->rx_quick_cons_trip_int << 16) |
4986                         bp->rx_quick_cons_trip);
4987
4988                 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4989                         (bp->rx_ticks_int << 16) | bp->rx_ticks);
4990         }
4991
4992         /* Clear internal stats counters. */
4993         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4994
4995         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4996
4997         /* Initialize the receive filter. */
4998         bnx2_set_rx_mode(bp->dev);
4999
5000         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5001                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5002                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5003                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5004         }
5005         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5006                           1, 0);
5007
5008         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5009         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5010
5011         udelay(20);
5012
5013         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
5014
5015         return rc;
5016 }
5017
5018 static void
5019 bnx2_clear_ring_states(struct bnx2 *bp)
5020 {
5021         struct bnx2_napi *bnapi;
5022         struct bnx2_tx_ring_info *txr;
5023         struct bnx2_rx_ring_info *rxr;
5024         int i;
5025
5026         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5027                 bnapi = &bp->bnx2_napi[i];
5028                 txr = &bnapi->tx_ring;
5029                 rxr = &bnapi->rx_ring;
5030
5031                 txr->tx_cons = 0;
5032                 txr->hw_tx_cons = 0;
5033                 rxr->rx_prod_bseq = 0;
5034                 rxr->rx_prod = 0;
5035                 rxr->rx_cons = 0;
5036                 rxr->rx_pg_prod = 0;
5037                 rxr->rx_pg_cons = 0;
5038         }
5039 }
5040
5041 static void
5042 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5043 {
5044         u32 val, offset0, offset1, offset2, offset3;
5045         u32 cid_addr = GET_CID_ADDR(cid);
5046
5047         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5048                 offset0 = BNX2_L2CTX_TYPE_XI;
5049                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5050                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5051                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5052         } else {
5053                 offset0 = BNX2_L2CTX_TYPE;
5054                 offset1 = BNX2_L2CTX_CMD_TYPE;
5055                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5056                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5057         }
5058         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5059         bnx2_ctx_wr(bp, cid_addr, offset0, val);
5060
5061         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5062         bnx2_ctx_wr(bp, cid_addr, offset1, val);
5063
5064         val = (u64) txr->tx_desc_mapping >> 32;
5065         bnx2_ctx_wr(bp, cid_addr, offset2, val);
5066
5067         val = (u64) txr->tx_desc_mapping & 0xffffffff;
5068         bnx2_ctx_wr(bp, cid_addr, offset3, val);
5069 }
5070
5071 static void
5072 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5073 {
5074         struct tx_bd *txbd;
5075         u32 cid = TX_CID;
5076         struct bnx2_napi *bnapi;
5077         struct bnx2_tx_ring_info *txr;
5078
5079         bnapi = &bp->bnx2_napi[ring_num];
5080         txr = &bnapi->tx_ring;
5081
5082         if (ring_num == 0)
5083                 cid = TX_CID;
5084         else
5085                 cid = TX_TSS_CID + ring_num - 1;
5086
5087         bp->tx_wake_thresh = bp->tx_ring_size / 2;
5088
5089         txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
5090
5091         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5092         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5093
5094         txr->tx_prod = 0;
5095         txr->tx_prod_bseq = 0;
5096
5097         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5098         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5099
5100         bnx2_init_tx_context(bp, cid, txr);
5101 }
5102
5103 static void
5104 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5105                      int num_rings)
5106 {
5107         int i;
5108         struct rx_bd *rxbd;
5109
5110         for (i = 0; i < num_rings; i++) {
5111                 int j;
5112
5113                 rxbd = &rx_ring[i][0];
5114                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5115                         rxbd->rx_bd_len = buf_size;
5116                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5117                 }
5118                 if (i == (num_rings - 1))
5119                         j = 0;
5120                 else
5121                         j = i + 1;
5122                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5123                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5124         }
5125 }
5126
5127 static void
5128 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5129 {
5130         int i;
5131         u16 prod, ring_prod;
5132         u32 cid, rx_cid_addr, val;
5133         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5134         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5135
5136         if (ring_num == 0)
5137                 cid = RX_CID;
5138         else
5139                 cid = RX_RSS_CID + ring_num - 1;
5140
5141         rx_cid_addr = GET_CID_ADDR(cid);
5142
5143         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5144                              bp->rx_buf_use_size, bp->rx_max_ring);
5145
5146         bnx2_init_rx_context(bp, cid);
5147
5148         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5149                 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5150                 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5151         }
5152
5153         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5154         if (bp->rx_pg_ring_size) {
5155                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5156                                      rxr->rx_pg_desc_mapping,
5157                                      PAGE_SIZE, bp->rx_max_pg_ring);
5158                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5159                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5160                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5161                        BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5162
5163                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5164                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5165
5166                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5167                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5168
5169                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5170                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5171         }
5172
5173         val = (u64) rxr->rx_desc_mapping[0] >> 32;
5174         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5175
5176         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5177         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5178
5179         ring_prod = prod = rxr->rx_pg_prod;
5180         for (i = 0; i < bp->rx_pg_ring_size; i++) {
5181                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0) {
5182                         netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5183                                     ring_num, i, bp->rx_pg_ring_size);
5184                         break;
5185                 }
5186                 prod = NEXT_RX_BD(prod);
5187                 ring_prod = RX_PG_RING_IDX(prod);
5188         }
5189         rxr->rx_pg_prod = prod;
5190
5191         ring_prod = prod = rxr->rx_prod;
5192         for (i = 0; i < bp->rx_ring_size; i++) {
5193                 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0) {
5194                         netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5195                                     ring_num, i, bp->rx_ring_size);
5196                         break;
5197                 }
5198                 prod = NEXT_RX_BD(prod);
5199                 ring_prod = RX_RING_IDX(prod);
5200         }
5201         rxr->rx_prod = prod;
5202
5203         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5204         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5205         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5206
5207         REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5208         REG_WR16(bp, rxr->rx_bidx_addr, prod);
5209
5210         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5211 }
5212
5213 static void
5214 bnx2_init_all_rings(struct bnx2 *bp)
5215 {
5216         int i;
5217         u32 val;
5218
5219         bnx2_clear_ring_states(bp);
5220
5221         REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5222         for (i = 0; i < bp->num_tx_rings; i++)
5223                 bnx2_init_tx_ring(bp, i);
5224
5225         if (bp->num_tx_rings > 1)
5226                 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5227                        (TX_TSS_CID << 7));
5228
5229         REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5230         bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5231
5232         for (i = 0; i < bp->num_rx_rings; i++)
5233                 bnx2_init_rx_ring(bp, i);
5234
5235         if (bp->num_rx_rings > 1) {
5236                 u32 tbl_32;
5237                 u8 *tbl = (u8 *) &tbl_32;
5238
5239                 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
5240                                 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
5241
5242                 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5243                         tbl[i % 4] = i % (bp->num_rx_rings - 1);
5244                         if ((i % 4) == 3)
5245                                 bnx2_reg_wr_ind(bp,
5246                                                 BNX2_RXP_SCRATCH_RSS_TBL + i,
5247                                                 cpu_to_be32(tbl_32));
5248                 }
5249
5250                 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5251                       BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5252
5253                 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5254
5255         }
5256 }
5257
5258 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5259 {
5260         u32 max, num_rings = 1;
5261
5262         while (ring_size > MAX_RX_DESC_CNT) {
5263                 ring_size -= MAX_RX_DESC_CNT;
5264                 num_rings++;
5265         }
5266         /* round to next power of 2 */
5267         max = max_size;
5268         while ((max & num_rings) == 0)
5269                 max >>= 1;
5270
5271         if (num_rings != max)
5272                 max <<= 1;
5273
5274         return max;
5275 }
5276
5277 static void
5278 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5279 {
5280         u32 rx_size, rx_space, jumbo_size;
5281
5282         /* 8 for CRC and VLAN */
5283         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5284
5285         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5286                 sizeof(struct skb_shared_info);
5287
5288         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5289         bp->rx_pg_ring_size = 0;
5290         bp->rx_max_pg_ring = 0;
5291         bp->rx_max_pg_ring_idx = 0;
5292         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5293                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5294
5295                 jumbo_size = size * pages;
5296                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5297                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5298
5299                 bp->rx_pg_ring_size = jumbo_size;
5300                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5301                                                         MAX_RX_PG_RINGS);
5302                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5303                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5304                 bp->rx_copy_thresh = 0;
5305         }
5306
5307         bp->rx_buf_use_size = rx_size;
5308         /* hw alignment */
5309         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5310         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5311         bp->rx_ring_size = size;
5312         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5313         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5314 }
5315
5316 static void
5317 bnx2_free_tx_skbs(struct bnx2 *bp)
5318 {
5319         int i;
5320
5321         for (i = 0; i < bp->num_tx_rings; i++) {
5322                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5323                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5324                 int j;
5325
5326                 if (txr->tx_buf_ring == NULL)
5327                         continue;
5328
5329                 for (j = 0; j < TX_DESC_CNT; ) {
5330                         struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5331                         struct sk_buff *skb = tx_buf->skb;
5332                         int k, last;
5333
5334                         if (skb == NULL) {
5335                                 j++;
5336                                 continue;
5337                         }
5338
5339                         pci_unmap_single(bp->pdev,
5340                                          dma_unmap_addr(tx_buf, mapping),
5341                                          skb_headlen(skb),
5342                                          PCI_DMA_TODEVICE);
5343
5344                         tx_buf->skb = NULL;
5345
5346                         last = tx_buf->nr_frags;
5347                         j++;
5348                         for (k = 0; k < last; k++, j++) {
5349                                 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5350                                 pci_unmap_page(bp->pdev,
5351                                         dma_unmap_addr(tx_buf, mapping),
5352                                         skb_shinfo(skb)->frags[k].size,
5353                                         PCI_DMA_TODEVICE);
5354                         }
5355                         dev_kfree_skb(skb);
5356                 }
5357         }
5358 }
5359
5360 static void
5361 bnx2_free_rx_skbs(struct bnx2 *bp)
5362 {
5363         int i;
5364
5365         for (i = 0; i < bp->num_rx_rings; i++) {
5366                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5367                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5368                 int j;
5369
5370                 if (rxr->rx_buf_ring == NULL)
5371                         return;
5372
5373                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5374                         struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5375                         struct sk_buff *skb = rx_buf->skb;
5376
5377                         if (skb == NULL)
5378                                 continue;
5379
5380                         pci_unmap_single(bp->pdev,
5381                                          dma_unmap_addr(rx_buf, mapping),
5382                                          bp->rx_buf_use_size,
5383                                          PCI_DMA_FROMDEVICE);
5384
5385                         rx_buf->skb = NULL;
5386
5387                         dev_kfree_skb(skb);
5388                 }
5389                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5390                         bnx2_free_rx_page(bp, rxr, j);
5391         }
5392 }
5393
5394 static void
5395 bnx2_free_skbs(struct bnx2 *bp)
5396 {
5397         bnx2_free_tx_skbs(bp);
5398         bnx2_free_rx_skbs(bp);
5399 }
5400
5401 static int
5402 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5403 {
5404         int rc;
5405
5406         rc = bnx2_reset_chip(bp, reset_code);
5407         bnx2_free_skbs(bp);
5408         if (rc)
5409                 return rc;
5410
5411         if ((rc = bnx2_init_chip(bp)) != 0)
5412                 return rc;
5413
5414         bnx2_init_all_rings(bp);
5415         return 0;
5416 }
5417
5418 static int
5419 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5420 {
5421         int rc;
5422
5423         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5424                 return rc;
5425
5426         spin_lock_bh(&bp->phy_lock);
5427         bnx2_init_phy(bp, reset_phy);
5428         bnx2_set_link(bp);
5429         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5430                 bnx2_remote_phy_event(bp);
5431         spin_unlock_bh(&bp->phy_lock);
5432         return 0;
5433 }
5434
5435 static int
5436 bnx2_shutdown_chip(struct bnx2 *bp)
5437 {
5438         u32 reset_code;
5439
5440         if (bp->flags & BNX2_FLAG_NO_WOL)
5441                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5442         else if (bp->wol)
5443                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5444         else
5445                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5446
5447         return bnx2_reset_chip(bp, reset_code);
5448 }
5449
5450 static int
5451 bnx2_test_registers(struct bnx2 *bp)
5452 {
5453         int ret;
5454         int i, is_5709;
5455         static const struct {
5456                 u16   offset;
5457                 u16   flags;
5458 #define BNX2_FL_NOT_5709        1
5459                 u32   rw_mask;
5460                 u32   ro_mask;
5461         } reg_tbl[] = {
5462                 { 0x006c, 0, 0x00000000, 0x0000003f },
5463                 { 0x0090, 0, 0xffffffff, 0x00000000 },
5464                 { 0x0094, 0, 0x00000000, 0x00000000 },
5465
5466                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5467                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5468                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5469                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5470                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5471                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5472                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5473                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5474                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5475
5476                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5477                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5478                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5479                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5480                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5481                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5482
5483                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5484                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5485                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5486
5487                 { 0x1000, 0, 0x00000000, 0x00000001 },
5488                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5489
5490                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5491                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5492                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5493                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5494                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5495                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5496                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5497                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5498                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5499                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5500
5501                 { 0x1800, 0, 0x00000000, 0x00000001 },
5502                 { 0x1804, 0, 0x00000000, 0x00000003 },
5503
5504                 { 0x2800, 0, 0x00000000, 0x00000001 },
5505                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5506                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5507                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5508                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5509                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5510                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5511                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5512                 { 0x2840, 0, 0x00000000, 0xffffffff },
5513                 { 0x2844, 0, 0x00000000, 0xffffffff },
5514                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5515                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5516
5517                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5518                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5519
5520                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5521                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5522                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5523                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5524                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5525                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5526                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5527                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5528                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5529
5530                 { 0x5004, 0, 0x00000000, 0x0000007f },
5531                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5532
5533                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5534                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5535                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5536                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5537                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5538                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5539                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5540                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5541                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5542
5543                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5544                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5545                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5546                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5547                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5548                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5549                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5550                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5551                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5552                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5553                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5554                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5555                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5556                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5557                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5558                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5559                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5560                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5561                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5562                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5563                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5564                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5565                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5566
5567                 { 0xffff, 0, 0x00000000, 0x00000000 },
5568         };
5569
5570         ret = 0;
5571         is_5709 = 0;
5572         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5573                 is_5709 = 1;
5574
5575         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5576                 u32 offset, rw_mask, ro_mask, save_val, val;
5577                 u16 flags = reg_tbl[i].flags;
5578
5579                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5580                         continue;
5581
5582                 offset = (u32) reg_tbl[i].offset;
5583                 rw_mask = reg_tbl[i].rw_mask;
5584                 ro_mask = reg_tbl[i].ro_mask;
5585
5586                 save_val = readl(bp->regview + offset);
5587
5588                 writel(0, bp->regview + offset);
5589
5590                 val = readl(bp->regview + offset);
5591                 if ((val & rw_mask) != 0) {
5592                         goto reg_test_err;
5593                 }
5594
5595                 if ((val & ro_mask) != (save_val & ro_mask)) {
5596                         goto reg_test_err;
5597                 }
5598
5599                 writel(0xffffffff, bp->regview + offset);
5600
5601                 val = readl(bp->regview + offset);
5602                 if ((val & rw_mask) != rw_mask) {
5603                         goto reg_test_err;
5604                 }
5605
5606                 if ((val & ro_mask) != (save_val & ro_mask)) {
5607                         goto reg_test_err;
5608                 }
5609
5610                 writel(save_val, bp->regview + offset);
5611                 continue;
5612
5613 reg_test_err:
5614                 writel(save_val, bp->regview + offset);
5615                 ret = -ENODEV;
5616                 break;
5617         }
5618         return ret;
5619 }
5620
5621 static int
5622 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5623 {
5624         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5625                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5626         int i;
5627
5628         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5629                 u32 offset;
5630
5631                 for (offset = 0; offset < size; offset += 4) {
5632
5633                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5634
5635                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5636                                 test_pattern[i]) {
5637                                 return -ENODEV;
5638                         }
5639                 }
5640         }
5641         return 0;
5642 }
5643
5644 static int
5645 bnx2_test_memory(struct bnx2 *bp)
5646 {
5647         int ret = 0;
5648         int i;
5649         static struct mem_entry {
5650                 u32   offset;
5651                 u32   len;
5652         } mem_tbl_5706[] = {
5653                 { 0x60000,  0x4000 },
5654                 { 0xa0000,  0x3000 },
5655                 { 0xe0000,  0x4000 },
5656                 { 0x120000, 0x4000 },
5657                 { 0x1a0000, 0x4000 },
5658                 { 0x160000, 0x4000 },
5659                 { 0xffffffff, 0    },
5660         },
5661         mem_tbl_5709[] = {
5662                 { 0x60000,  0x4000 },
5663                 { 0xa0000,  0x3000 },
5664                 { 0xe0000,  0x4000 },
5665                 { 0x120000, 0x4000 },
5666                 { 0x1a0000, 0x4000 },
5667                 { 0xffffffff, 0    },
5668         };
5669         struct mem_entry *mem_tbl;
5670
5671         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5672                 mem_tbl = mem_tbl_5709;
5673         else
5674                 mem_tbl = mem_tbl_5706;
5675
5676         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5677                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5678                         mem_tbl[i].len)) != 0) {
5679                         return ret;
5680                 }
5681         }
5682
5683         return ret;
5684 }
5685
5686 #define BNX2_MAC_LOOPBACK       0
5687 #define BNX2_PHY_LOOPBACK       1
5688
5689 static int
5690 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5691 {
5692         unsigned int pkt_size, num_pkts, i;
5693         struct sk_buff *skb, *rx_skb;
5694         unsigned char *packet;
5695         u16 rx_start_idx, rx_idx;
5696         dma_addr_t map;
5697         struct tx_bd *txbd;
5698         struct sw_bd *rx_buf;
5699         struct l2_fhdr *rx_hdr;
5700         int ret = -ENODEV;
5701         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5702         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5703         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5704
5705         tx_napi = bnapi;
5706
5707         txr = &tx_napi->tx_ring;
5708         rxr = &bnapi->rx_ring;
5709         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5710                 bp->loopback = MAC_LOOPBACK;
5711                 bnx2_set_mac_loopback(bp);
5712         }
5713         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5714                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5715                         return 0;
5716
5717                 bp->loopback = PHY_LOOPBACK;
5718                 bnx2_set_phy_loopback(bp);
5719         }
5720         else
5721                 return -EINVAL;
5722
5723         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5724         skb = netdev_alloc_skb(bp->dev, pkt_size);
5725         if (!skb)
5726                 return -ENOMEM;
5727         packet = skb_put(skb, pkt_size);
5728         memcpy(packet, bp->dev->dev_addr, 6);
5729         memset(packet + 6, 0x0, 8);
5730         for (i = 14; i < pkt_size; i++)
5731                 packet[i] = (unsigned char) (i & 0xff);
5732
5733         map = pci_map_single(bp->pdev, skb->data, pkt_size,
5734                 PCI_DMA_TODEVICE);
5735         if (pci_dma_mapping_error(bp->pdev, map)) {
5736                 dev_kfree_skb(skb);
5737                 return -EIO;
5738         }
5739
5740         REG_WR(bp, BNX2_HC_COMMAND,
5741                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5742
5743         REG_RD(bp, BNX2_HC_COMMAND);
5744
5745         udelay(5);
5746         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5747
5748         num_pkts = 0;
5749
5750         txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5751
5752         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5753         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5754         txbd->tx_bd_mss_nbytes = pkt_size;
5755         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5756
5757         num_pkts++;
5758         txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5759         txr->tx_prod_bseq += pkt_size;
5760
5761         REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5762         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5763
5764         udelay(100);
5765
5766         REG_WR(bp, BNX2_HC_COMMAND,
5767                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5768
5769         REG_RD(bp, BNX2_HC_COMMAND);
5770
5771         udelay(5);
5772
5773         pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5774         dev_kfree_skb(skb);
5775
5776         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5777                 goto loopback_test_done;
5778
5779         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5780         if (rx_idx != rx_start_idx + num_pkts) {
5781                 goto loopback_test_done;
5782         }
5783
5784         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5785         rx_skb = rx_buf->skb;
5786
5787         rx_hdr = rx_buf->desc;
5788         skb_reserve(rx_skb, BNX2_RX_OFFSET);
5789
5790         pci_dma_sync_single_for_cpu(bp->pdev,
5791                 dma_unmap_addr(rx_buf, mapping),
5792                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5793
5794         if (rx_hdr->l2_fhdr_status &
5795                 (L2_FHDR_ERRORS_BAD_CRC |
5796                 L2_FHDR_ERRORS_PHY_DECODE |
5797                 L2_FHDR_ERRORS_ALIGNMENT |
5798                 L2_FHDR_ERRORS_TOO_SHORT |
5799                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5800
5801                 goto loopback_test_done;
5802         }
5803
5804         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5805                 goto loopback_test_done;
5806         }
5807
5808         for (i = 14; i < pkt_size; i++) {
5809                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5810                         goto loopback_test_done;
5811                 }
5812         }
5813
5814         ret = 0;
5815
5816 loopback_test_done:
5817         bp->loopback = 0;
5818         return ret;
5819 }
5820
5821 #define BNX2_MAC_LOOPBACK_FAILED        1
5822 #define BNX2_PHY_LOOPBACK_FAILED        2
5823 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5824                                          BNX2_PHY_LOOPBACK_FAILED)
5825
5826 static int
5827 bnx2_test_loopback(struct bnx2 *bp)
5828 {
5829         int rc = 0;
5830
5831         if (!netif_running(bp->dev))
5832                 return BNX2_LOOPBACK_FAILED;
5833
5834         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5835         spin_lock_bh(&bp->phy_lock);
5836         bnx2_init_phy(bp, 1);
5837         spin_unlock_bh(&bp->phy_lock);
5838         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5839                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5840         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5841                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5842         return rc;
5843 }
5844
5845 #define NVRAM_SIZE 0x200
5846 #define CRC32_RESIDUAL 0xdebb20e3
5847
5848 static int
5849 bnx2_test_nvram(struct bnx2 *bp)
5850 {
5851         __be32 buf[NVRAM_SIZE / 4];
5852         u8 *data = (u8 *) buf;
5853         int rc = 0;
5854         u32 magic, csum;
5855
5856         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5857                 goto test_nvram_done;
5858
5859         magic = be32_to_cpu(buf[0]);
5860         if (magic != 0x669955aa) {
5861                 rc = -ENODEV;
5862                 goto test_nvram_done;
5863         }
5864
5865         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5866                 goto test_nvram_done;
5867
5868         csum = ether_crc_le(0x100, data);
5869         if (csum != CRC32_RESIDUAL) {
5870                 rc = -ENODEV;
5871                 goto test_nvram_done;
5872         }
5873
5874         csum = ether_crc_le(0x100, data + 0x100);
5875         if (csum != CRC32_RESIDUAL) {
5876                 rc = -ENODEV;
5877         }
5878
5879 test_nvram_done:
5880         return rc;
5881 }
5882
5883 static int
5884 bnx2_test_link(struct bnx2 *bp)
5885 {
5886         u32 bmsr;
5887
5888         if (!netif_running(bp->dev))
5889                 return -ENODEV;
5890
5891         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5892                 if (bp->link_up)
5893                         return 0;
5894                 return -ENODEV;
5895         }
5896         spin_lock_bh(&bp->phy_lock);
5897         bnx2_enable_bmsr1(bp);
5898         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5899         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5900         bnx2_disable_bmsr1(bp);
5901         spin_unlock_bh(&bp->phy_lock);
5902
5903         if (bmsr & BMSR_LSTATUS) {
5904                 return 0;
5905         }
5906         return -ENODEV;
5907 }
5908
5909 static int
5910 bnx2_test_intr(struct bnx2 *bp)
5911 {
5912         int i;
5913         u16 status_idx;
5914
5915         if (!netif_running(bp->dev))
5916                 return -ENODEV;
5917
5918         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5919
5920         /* This register is not touched during run-time. */
5921         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5922         REG_RD(bp, BNX2_HC_COMMAND);
5923
5924         for (i = 0; i < 10; i++) {
5925                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5926                         status_idx) {
5927
5928                         break;
5929                 }
5930
5931                 msleep_interruptible(10);
5932         }
5933         if (i < 10)
5934                 return 0;
5935
5936         return -ENODEV;
5937 }
5938
5939 /* Determining link for parallel detection. */
5940 static int
5941 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5942 {
5943         u32 mode_ctl, an_dbg, exp;
5944
5945         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5946                 return 0;
5947
5948         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5949         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5950
5951         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5952                 return 0;
5953
5954         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5955         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5956         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5957
5958         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5959                 return 0;
5960
5961         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5962         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5963         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5964
5965         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
5966                 return 0;
5967
5968         return 1;
5969 }
5970
5971 static void
5972 bnx2_5706_serdes_timer(struct bnx2 *bp)
5973 {
5974         int check_link = 1;
5975
5976         spin_lock(&bp->phy_lock);
5977         if (bp->serdes_an_pending) {
5978                 bp->serdes_an_pending--;
5979                 check_link = 0;
5980         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5981                 u32 bmcr;
5982
5983                 bp->current_interval = BNX2_TIMER_INTERVAL;
5984
5985                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5986
5987                 if (bmcr & BMCR_ANENABLE) {
5988                         if (bnx2_5706_serdes_has_link(bp)) {
5989                                 bmcr &= ~BMCR_ANENABLE;
5990                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5991                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5992                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5993                         }
5994                 }
5995         }
5996         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5997                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5998                 u32 phy2;
5999
6000                 bnx2_write_phy(bp, 0x17, 0x0f01);
6001                 bnx2_read_phy(bp, 0x15, &phy2);
6002                 if (phy2 & 0x20) {
6003                         u32 bmcr;
6004
6005                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6006                         bmcr |= BMCR_ANENABLE;
6007                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6008
6009                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6010                 }
6011         } else
6012                 bp->current_interval = BNX2_TIMER_INTERVAL;
6013
6014         if (check_link) {
6015                 u32 val;
6016
6017                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6018                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6019                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6020
6021                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6022                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6023                                 bnx2_5706s_force_link_dn(bp, 1);
6024                                 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6025                         } else
6026                                 bnx2_set_link(bp);
6027                 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6028                         bnx2_set_link(bp);
6029         }
6030         spin_unlock(&bp->phy_lock);
6031 }
6032
6033 static void
6034 bnx2_5708_serdes_timer(struct bnx2 *bp)
6035 {
6036         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6037                 return;
6038
6039         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6040                 bp->serdes_an_pending = 0;
6041                 return;
6042         }
6043
6044         spin_lock(&bp->phy_lock);
6045         if (bp->serdes_an_pending)
6046                 bp->serdes_an_pending--;
6047         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6048                 u32 bmcr;
6049
6050                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6051                 if (bmcr & BMCR_ANENABLE) {
6052                         bnx2_enable_forced_2g5(bp);
6053                         bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6054                 } else {
6055                         bnx2_disable_forced_2g5(bp);
6056                         bp->serdes_an_pending = 2;
6057                         bp->current_interval = BNX2_TIMER_INTERVAL;
6058                 }
6059
6060         } else
6061                 bp->current_interval = BNX2_TIMER_INTERVAL;
6062
6063         spin_unlock(&bp->phy_lock);
6064 }
6065
6066 static void
6067 bnx2_timer(unsigned long data)
6068 {
6069         struct bnx2 *bp = (struct bnx2 *) data;
6070
6071         if (!netif_running(bp->dev))
6072                 return;
6073
6074         if (atomic_read(&bp->intr_sem) != 0)
6075                 goto bnx2_restart_timer;
6076
6077         if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6078              BNX2_FLAG_USING_MSI)
6079                 bnx2_chk_missed_msi(bp);
6080
6081         bnx2_send_heart_beat(bp);
6082
6083         bp->stats_blk->stat_FwRxDrop =
6084                 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6085
6086         /* workaround occasional corrupted counters */
6087         if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6088                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6089                                             BNX2_HC_COMMAND_STATS_NOW);
6090
6091         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6092                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
6093                         bnx2_5706_serdes_timer(bp);
6094                 else
6095                         bnx2_5708_serdes_timer(bp);
6096         }
6097
6098 bnx2_restart_timer:
6099         mod_timer(&bp->timer, jiffies + bp->current_interval);
6100 }
6101
6102 static int
6103 bnx2_request_irq(struct bnx2 *bp)
6104 {
6105         unsigned long flags;
6106         struct bnx2_irq *irq;
6107         int rc = 0, i;
6108
6109         if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6110                 flags = 0;
6111         else
6112                 flags = IRQF_SHARED;
6113
6114         for (i = 0; i < bp->irq_nvecs; i++) {
6115                 irq = &bp->irq_tbl[i];
6116                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6117                                  &bp->bnx2_napi[i]);
6118                 if (rc)
6119                         break;
6120                 irq->requested = 1;
6121         }
6122         return rc;
6123 }
6124
6125 static void
6126 bnx2_free_irq(struct bnx2 *bp)
6127 {
6128         struct bnx2_irq *irq;
6129         int i;
6130
6131         for (i = 0; i < bp->irq_nvecs; i++) {
6132                 irq = &bp->irq_tbl[i];
6133                 if (irq->requested)
6134                         free_irq(irq->vector, &bp->bnx2_napi[i]);
6135                 irq->requested = 0;
6136         }
6137         if (bp->flags & BNX2_FLAG_USING_MSI)
6138                 pci_disable_msi(bp->pdev);
6139         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6140                 pci_disable_msix(bp->pdev);
6141
6142         bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6143 }
6144
6145 static void
6146 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6147 {
6148         int i, rc;
6149         struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6150         struct net_device *dev = bp->dev;
6151         const int len = sizeof(bp->irq_tbl[0].name);
6152
6153         bnx2_setup_msix_tbl(bp);
6154         REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6155         REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6156         REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6157
6158         /*  Need to flush the previous three writes to ensure MSI-X
6159          *  is setup properly */
6160         REG_RD(bp, BNX2_PCI_MSIX_CONTROL);
6161
6162         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6163                 msix_ent[i].entry = i;
6164                 msix_ent[i].vector = 0;
6165         }
6166
6167         rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
6168         if (rc != 0)
6169                 return;
6170
6171         bp->irq_nvecs = msix_vecs;
6172         bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6173         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6174                 bp->irq_tbl[i].vector = msix_ent[i].vector;
6175                 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6176                 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6177         }
6178 }
6179
6180 static void
6181 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6182 {
6183         int cpus = num_online_cpus();
6184         int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
6185
6186         bp->irq_tbl[0].handler = bnx2_interrupt;
6187         strcpy(bp->irq_tbl[0].name, bp->dev->name);
6188         bp->irq_nvecs = 1;
6189         bp->irq_tbl[0].vector = bp->pdev->irq;
6190
6191         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
6192                 bnx2_enable_msix(bp, msix_vecs);
6193
6194         if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6195             !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6196                 if (pci_enable_msi(bp->pdev) == 0) {
6197                         bp->flags |= BNX2_FLAG_USING_MSI;
6198                         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6199                                 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6200                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6201                         } else
6202                                 bp->irq_tbl[0].handler = bnx2_msi;
6203
6204                         bp->irq_tbl[0].vector = bp->pdev->irq;
6205                 }
6206         }
6207
6208         bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6209         bp->dev->real_num_tx_queues = bp->num_tx_rings;
6210
6211         bp->num_rx_rings = bp->irq_nvecs;
6212 }
6213
6214 /* Called with rtnl_lock */
6215 static int
6216 bnx2_open(struct net_device *dev)
6217 {
6218         struct bnx2 *bp = netdev_priv(dev);
6219         int rc;
6220
6221         netif_carrier_off(dev);
6222
6223         bnx2_set_power_state(bp, PCI_D0);
6224         bnx2_disable_int(bp);
6225
6226         bnx2_setup_int_mode(bp, disable_msi);
6227         bnx2_init_napi(bp);
6228         bnx2_napi_enable(bp);
6229         rc = bnx2_alloc_mem(bp);
6230         if (rc)
6231                 goto open_err;
6232
6233         rc = bnx2_request_irq(bp);
6234         if (rc)
6235                 goto open_err;
6236
6237         rc = bnx2_init_nic(bp, 1);
6238         if (rc)
6239                 goto open_err;
6240
6241         mod_timer(&bp->timer, jiffies + bp->current_interval);
6242
6243         atomic_set(&bp->intr_sem, 0);
6244
6245         memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6246
6247         bnx2_enable_int(bp);
6248
6249         if (bp->flags & BNX2_FLAG_USING_MSI) {
6250                 /* Test MSI to make sure it is working
6251                  * If MSI test fails, go back to INTx mode
6252                  */
6253                 if (bnx2_test_intr(bp) != 0) {
6254                         netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6255
6256                         bnx2_disable_int(bp);
6257                         bnx2_free_irq(bp);
6258
6259                         bnx2_setup_int_mode(bp, 1);
6260
6261                         rc = bnx2_init_nic(bp, 0);
6262
6263                         if (!rc)
6264                                 rc = bnx2_request_irq(bp);
6265
6266                         if (rc) {
6267                                 del_timer_sync(&bp->timer);
6268                                 goto open_err;
6269                         }
6270                         bnx2_enable_int(bp);
6271                 }
6272         }
6273         if (bp->flags & BNX2_FLAG_USING_MSI)
6274                 netdev_info(dev, "using MSI\n");
6275         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6276                 netdev_info(dev, "using MSIX\n");
6277
6278         netif_tx_start_all_queues(dev);
6279
6280         return 0;
6281
6282 open_err:
6283         bnx2_napi_disable(bp);
6284         bnx2_free_skbs(bp);
6285         bnx2_free_irq(bp);
6286         bnx2_free_mem(bp);
6287         bnx2_del_napi(bp);
6288         return rc;
6289 }
6290
6291 static void
6292 bnx2_reset_task(struct work_struct *work)
6293 {
6294         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6295
6296         rtnl_lock();
6297         if (!netif_running(bp->dev)) {
6298                 rtnl_unlock();
6299                 return;
6300         }
6301
6302         bnx2_netif_stop(bp, true);
6303
6304         bnx2_init_nic(bp, 1);
6305
6306         atomic_set(&bp->intr_sem, 1);
6307         bnx2_netif_start(bp, true);
6308         rtnl_unlock();
6309 }
6310
6311 static void
6312 bnx2_dump_state(struct bnx2 *bp)
6313 {
6314         struct net_device *dev = bp->dev;
6315         u32 mcp_p0, mcp_p1;
6316
6317         netdev_err(dev, "DEBUG: intr_sem[%x]\n", atomic_read(&bp->intr_sem));
6318         netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6319                    REG_RD(bp, BNX2_EMAC_TX_STATUS),
6320                    REG_RD(bp, BNX2_EMAC_RX_STATUS));
6321         netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6322                    REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6323         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6324                 mcp_p0 = BNX2_MCP_STATE_P0;
6325                 mcp_p1 = BNX2_MCP_STATE_P1;
6326         } else {
6327                 mcp_p0 = BNX2_MCP_STATE_P0_5708;
6328                 mcp_p1 = BNX2_MCP_STATE_P1_5708;
6329         }
6330         netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
6331                    bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
6332         netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6333                    REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6334         if (bp->flags & BNX2_FLAG_USING_MSIX)
6335                 netdev_err(dev, "DEBUG: PBA[%08x]\n",
6336                            REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6337 }
6338
6339 static void
6340 bnx2_tx_timeout(struct net_device *dev)
6341 {
6342         struct bnx2 *bp = netdev_priv(dev);
6343
6344         bnx2_dump_state(bp);
6345
6346         /* This allows the netif to be shutdown gracefully before resetting */
6347         schedule_work(&bp->reset_task);
6348 }
6349
6350 #ifdef BCM_VLAN
6351 /* Called with rtnl_lock */
6352 static void
6353 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6354 {
6355         struct bnx2 *bp = netdev_priv(dev);
6356
6357         if (netif_running(dev))
6358                 bnx2_netif_stop(bp, false);
6359
6360         bp->vlgrp = vlgrp;
6361
6362         if (!netif_running(dev))
6363                 return;
6364
6365         bnx2_set_rx_mode(dev);
6366         if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6367                 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
6368
6369         bnx2_netif_start(bp, false);
6370 }
6371 #endif
6372
6373 /* Called with netif_tx_lock.
6374  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6375  * netif_wake_queue().
6376  */
6377 static netdev_tx_t
6378 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6379 {
6380         struct bnx2 *bp = netdev_priv(dev);
6381         dma_addr_t mapping;
6382         struct tx_bd *txbd;
6383         struct sw_tx_bd *tx_buf;
6384         u32 len, vlan_tag_flags, last_frag, mss;
6385         u16 prod, ring_prod;
6386         int i;
6387         struct bnx2_napi *bnapi;
6388         struct bnx2_tx_ring_info *txr;
6389         struct netdev_queue *txq;
6390
6391         /*  Determine which tx ring we will be placed on */
6392         i = skb_get_queue_mapping(skb);
6393         bnapi = &bp->bnx2_napi[i];
6394         txr = &bnapi->tx_ring;
6395         txq = netdev_get_tx_queue(dev, i);
6396
6397         if (unlikely(bnx2_tx_avail(bp, txr) <
6398             (skb_shinfo(skb)->nr_frags + 1))) {
6399                 netif_tx_stop_queue(txq);
6400                 netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6401
6402                 return NETDEV_TX_BUSY;
6403         }
6404         len = skb_headlen(skb);
6405         prod = txr->tx_prod;
6406         ring_prod = TX_RING_IDX(prod);
6407
6408         vlan_tag_flags = 0;
6409         if (skb->ip_summed == CHECKSUM_PARTIAL) {
6410                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6411         }
6412
6413 #ifdef BCM_VLAN
6414         if (bp->vlgrp && vlan_tx_tag_present(skb)) {
6415                 vlan_tag_flags |=
6416                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6417         }
6418 #endif
6419         if ((mss = skb_shinfo(skb)->gso_size)) {
6420                 u32 tcp_opt_len;
6421                 struct iphdr *iph;
6422
6423                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6424
6425                 tcp_opt_len = tcp_optlen(skb);
6426
6427                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6428                         u32 tcp_off = skb_transport_offset(skb) -
6429                                       sizeof(struct ipv6hdr) - ETH_HLEN;
6430
6431                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6432                                           TX_BD_FLAGS_SW_FLAGS;
6433                         if (likely(tcp_off == 0))
6434                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6435                         else {
6436                                 tcp_off >>= 3;
6437                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
6438                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
6439                                                   ((tcp_off & 0x10) <<
6440                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
6441                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6442                         }
6443                 } else {
6444                         iph = ip_hdr(skb);
6445                         if (tcp_opt_len || (iph->ihl > 5)) {
6446                                 vlan_tag_flags |= ((iph->ihl - 5) +
6447                                                    (tcp_opt_len >> 2)) << 8;
6448                         }
6449                 }
6450         } else
6451                 mss = 0;
6452
6453         mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6454         if (pci_dma_mapping_error(bp->pdev, mapping)) {
6455                 dev_kfree_skb(skb);
6456                 return NETDEV_TX_OK;
6457         }
6458
6459         tx_buf = &txr->tx_buf_ring[ring_prod];
6460         tx_buf->skb = skb;
6461         dma_unmap_addr_set(tx_buf, mapping, mapping);
6462
6463         txbd = &txr->tx_desc_ring[ring_prod];
6464
6465         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6466         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6467         txbd->tx_bd_mss_nbytes = len | (mss << 16);
6468         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6469
6470         last_frag = skb_shinfo(skb)->nr_frags;
6471         tx_buf->nr_frags = last_frag;
6472         tx_buf->is_gso = skb_is_gso(skb);
6473
6474         for (i = 0; i < last_frag; i++) {
6475                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6476
6477                 prod = NEXT_TX_BD(prod);
6478                 ring_prod = TX_RING_IDX(prod);
6479                 txbd = &txr->tx_desc_ring[ring_prod];
6480
6481                 len = frag->size;
6482                 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
6483                         len, PCI_DMA_TODEVICE);
6484                 if (pci_dma_mapping_error(bp->pdev, mapping))
6485                         goto dma_error;
6486                 dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6487                                    mapping);
6488
6489                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6490                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6491                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6492                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6493
6494         }
6495         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6496
6497         prod = NEXT_TX_BD(prod);
6498         txr->tx_prod_bseq += skb->len;
6499
6500         REG_WR16(bp, txr->tx_bidx_addr, prod);
6501         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6502
6503         mmiowb();
6504
6505         txr->tx_prod = prod;
6506
6507         if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6508                 netif_tx_stop_queue(txq);
6509                 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6510                         netif_tx_wake_queue(txq);
6511         }
6512
6513         return NETDEV_TX_OK;
6514 dma_error:
6515         /* save value of frag that failed */
6516         last_frag = i;
6517
6518         /* start back at beginning and unmap skb */
6519         prod = txr->tx_prod;
6520         ring_prod = TX_RING_IDX(prod);
6521         tx_buf = &txr->tx_buf_ring[ring_prod];
6522         tx_buf->skb = NULL;
6523         pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
6524                          skb_headlen(skb), PCI_DMA_TODEVICE);
6525
6526         /* unmap remaining mapped pages */
6527         for (i = 0; i < last_frag; i++) {
6528                 prod = NEXT_TX_BD(prod);
6529                 ring_prod = TX_RING_IDX(prod);
6530                 tx_buf = &txr->tx_buf_ring[ring_prod];
6531                 pci_unmap_page(bp->pdev, dma_unmap_addr(tx_buf, mapping),
6532                                skb_shinfo(skb)->frags[i].size,
6533                                PCI_DMA_TODEVICE);
6534         }
6535
6536         dev_kfree_skb(skb);
6537         return NETDEV_TX_OK;
6538 }
6539
6540 /* Called with rtnl_lock */
6541 static int
6542 bnx2_close(struct net_device *dev)
6543 {
6544         struct bnx2 *bp = netdev_priv(dev);
6545
6546         cancel_work_sync(&bp->reset_task);
6547
6548         bnx2_disable_int_sync(bp);
6549         bnx2_napi_disable(bp);
6550         del_timer_sync(&bp->timer);
6551         bnx2_shutdown_chip(bp);
6552         bnx2_free_irq(bp);
6553         bnx2_free_skbs(bp);
6554         bnx2_free_mem(bp);
6555         bnx2_del_napi(bp);
6556         bp->link_up = 0;
6557         netif_carrier_off(bp->dev);
6558         bnx2_set_power_state(bp, PCI_D3hot);
6559         return 0;
6560 }
6561
6562 static void
6563 bnx2_save_stats(struct bnx2 *bp)
6564 {
6565         u32 *hw_stats = (u32 *) bp->stats_blk;
6566         u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6567         int i;
6568
6569         /* The 1st 10 counters are 64-bit counters */
6570         for (i = 0; i < 20; i += 2) {
6571                 u32 hi;
6572                 u64 lo;
6573
6574                 hi = temp_stats[i] + hw_stats[i];
6575                 lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6576                 if (lo > 0xffffffff)
6577                         hi++;
6578                 temp_stats[i] = hi;
6579                 temp_stats[i + 1] = lo & 0xffffffff;
6580         }
6581
6582         for ( ; i < sizeof(struct statistics_block) / 4; i++)
6583                 temp_stats[i] += hw_stats[i];
6584 }
6585
6586 #define GET_64BIT_NET_STATS64(ctr)                              \
6587         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
6588         (unsigned long) (ctr##_lo)
6589
6590 #define GET_64BIT_NET_STATS32(ctr)                              \
6591         (ctr##_lo)
6592
6593 #if (BITS_PER_LONG == 64)
6594 #define GET_64BIT_NET_STATS(ctr)                                \
6595         GET_64BIT_NET_STATS64(bp->stats_blk->ctr) +             \
6596         GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6597 #else
6598 #define GET_64BIT_NET_STATS(ctr)                                \
6599         GET_64BIT_NET_STATS32(bp->stats_blk->ctr) +             \
6600         GET_64BIT_NET_STATS32(bp->temp_stats_blk->ctr)
6601 #endif
6602
6603 #define GET_32BIT_NET_STATS(ctr)                                \
6604         (unsigned long) (bp->stats_blk->ctr +                   \
6605                          bp->temp_stats_blk->ctr)
6606
6607 static struct net_device_stats *
6608 bnx2_get_stats(struct net_device *dev)
6609 {
6610         struct bnx2 *bp = netdev_priv(dev);
6611         struct net_device_stats *net_stats = &dev->stats;
6612
6613         if (bp->stats_blk == NULL) {
6614                 return net_stats;
6615         }
6616         net_stats->rx_packets =
6617                 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6618                 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6619                 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6620
6621         net_stats->tx_packets =
6622                 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6623                 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6624                 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6625
6626         net_stats->rx_bytes =
6627                 GET_64BIT_NET_STATS(stat_IfHCInOctets);
6628
6629         net_stats->tx_bytes =
6630                 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6631
6632         net_stats->multicast =
6633                 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts);
6634
6635         net_stats->collisions =
6636                 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6637
6638         net_stats->rx_length_errors =
6639                 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6640                 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6641
6642         net_stats->rx_over_errors =
6643                 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6644                 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6645
6646         net_stats->rx_frame_errors =
6647                 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6648
6649         net_stats->rx_crc_errors =
6650                 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6651
6652         net_stats->rx_errors = net_stats->rx_length_errors +
6653                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6654                 net_stats->rx_crc_errors;
6655
6656         net_stats->tx_aborted_errors =
6657                 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6658                 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6659
6660         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6661             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6662                 net_stats->tx_carrier_errors = 0;
6663         else {
6664                 net_stats->tx_carrier_errors =
6665                         GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6666         }
6667
6668         net_stats->tx_errors =
6669                 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6670                 net_stats->tx_aborted_errors +
6671                 net_stats->tx_carrier_errors;
6672
6673         net_stats->rx_missed_errors =
6674                 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6675                 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6676                 GET_32BIT_NET_STATS(stat_FwRxDrop);
6677
6678         return net_stats;
6679 }
6680
6681 /* All ethtool functions called with rtnl_lock */
6682
6683 static int
6684 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6685 {
6686         struct bnx2 *bp = netdev_priv(dev);
6687         int support_serdes = 0, support_copper = 0;
6688
6689         cmd->supported = SUPPORTED_Autoneg;
6690         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6691                 support_serdes = 1;
6692                 support_copper = 1;
6693         } else if (bp->phy_port == PORT_FIBRE)
6694                 support_serdes = 1;
6695         else
6696                 support_copper = 1;
6697
6698         if (support_serdes) {
6699                 cmd->supported |= SUPPORTED_1000baseT_Full |
6700                         SUPPORTED_FIBRE;
6701                 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6702                         cmd->supported |= SUPPORTED_2500baseX_Full;
6703
6704         }
6705         if (support_copper) {
6706                 cmd->supported |= SUPPORTED_10baseT_Half |
6707                         SUPPORTED_10baseT_Full |
6708                         SUPPORTED_100baseT_Half |
6709                         SUPPORTED_100baseT_Full |
6710                         SUPPORTED_1000baseT_Full |
6711                         SUPPORTED_TP;
6712
6713         }
6714
6715         spin_lock_bh(&bp->phy_lock);
6716         cmd->port = bp->phy_port;
6717         cmd->advertising = bp->advertising;
6718
6719         if (bp->autoneg & AUTONEG_SPEED) {
6720                 cmd->autoneg = AUTONEG_ENABLE;
6721         }
6722         else {
6723                 cmd->autoneg = AUTONEG_DISABLE;
6724         }
6725
6726         if (netif_carrier_ok(dev)) {
6727                 cmd->speed = bp->line_speed;
6728                 cmd->duplex = bp->duplex;
6729         }
6730         else {
6731                 cmd->speed = -1;
6732                 cmd->duplex = -1;
6733         }
6734         spin_unlock_bh(&bp->phy_lock);
6735
6736         cmd->transceiver = XCVR_INTERNAL;
6737         cmd->phy_address = bp->phy_addr;
6738
6739         return 0;
6740 }
6741
6742 static int
6743 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6744 {
6745         struct bnx2 *bp = netdev_priv(dev);
6746         u8 autoneg = bp->autoneg;
6747         u8 req_duplex = bp->req_duplex;
6748         u16 req_line_speed = bp->req_line_speed;
6749         u32 advertising = bp->advertising;
6750         int err = -EINVAL;
6751
6752         spin_lock_bh(&bp->phy_lock);
6753
6754         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6755                 goto err_out_unlock;
6756
6757         if (cmd->port != bp->phy_port &&
6758             !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6759                 goto err_out_unlock;
6760
6761         /* If device is down, we can store the settings only if the user
6762          * is setting the currently active port.
6763          */
6764         if (!netif_running(dev) && cmd->port != bp->phy_port)
6765                 goto err_out_unlock;
6766
6767         if (cmd->autoneg == AUTONEG_ENABLE) {
6768                 autoneg |= AUTONEG_SPEED;
6769
6770                 advertising = cmd->advertising;
6771                 if (cmd->port == PORT_TP) {
6772                         advertising &= ETHTOOL_ALL_COPPER_SPEED;
6773                         if (!advertising)
6774                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
6775                 } else {
6776                         advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6777                         if (!advertising)
6778                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6779                 }
6780                 advertising |= ADVERTISED_Autoneg;
6781         }
6782         else {
6783                 if (cmd->port == PORT_FIBRE) {
6784                         if ((cmd->speed != SPEED_1000 &&
6785                              cmd->speed != SPEED_2500) ||
6786                             (cmd->duplex != DUPLEX_FULL))
6787                                 goto err_out_unlock;
6788
6789                         if (cmd->speed == SPEED_2500 &&
6790                             !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6791                                 goto err_out_unlock;
6792                 }
6793                 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6794                         goto err_out_unlock;
6795
6796                 autoneg &= ~AUTONEG_SPEED;
6797                 req_line_speed = cmd->speed;
6798                 req_duplex = cmd->duplex;
6799                 advertising = 0;
6800         }
6801
6802         bp->autoneg = autoneg;
6803         bp->advertising = advertising;
6804         bp->req_line_speed = req_line_speed;
6805         bp->req_duplex = req_duplex;
6806
6807         err = 0;
6808         /* If device is down, the new settings will be picked up when it is
6809          * brought up.
6810          */
6811         if (netif_running(dev))
6812                 err = bnx2_setup_phy(bp, cmd->port);
6813
6814 err_out_unlock:
6815         spin_unlock_bh(&bp->phy_lock);
6816
6817         return err;
6818 }
6819
6820 static void
6821 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6822 {
6823         struct bnx2 *bp = netdev_priv(dev);
6824
6825         strcpy(info->driver, DRV_MODULE_NAME);
6826         strcpy(info->version, DRV_MODULE_VERSION);
6827         strcpy(info->bus_info, pci_name(bp->pdev));
6828         strcpy(info->fw_version, bp->fw_version);
6829 }
6830
6831 #define BNX2_REGDUMP_LEN                (32 * 1024)
6832
6833 static int
6834 bnx2_get_regs_len(struct net_device *dev)
6835 {
6836         return BNX2_REGDUMP_LEN;
6837 }
6838
6839 static void
6840 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6841 {
6842         u32 *p = _p, i, offset;
6843         u8 *orig_p = _p;
6844         struct bnx2 *bp = netdev_priv(dev);
6845         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6846                                  0x0800, 0x0880, 0x0c00, 0x0c10,
6847                                  0x0c30, 0x0d08, 0x1000, 0x101c,
6848                                  0x1040, 0x1048, 0x1080, 0x10a4,
6849                                  0x1400, 0x1490, 0x1498, 0x14f0,
6850                                  0x1500, 0x155c, 0x1580, 0x15dc,
6851                                  0x1600, 0x1658, 0x1680, 0x16d8,
6852                                  0x1800, 0x1820, 0x1840, 0x1854,
6853                                  0x1880, 0x1894, 0x1900, 0x1984,
6854                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6855                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
6856                                  0x2000, 0x2030, 0x23c0, 0x2400,
6857                                  0x2800, 0x2820, 0x2830, 0x2850,
6858                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
6859                                  0x3c00, 0x3c94, 0x4000, 0x4010,
6860                                  0x4080, 0x4090, 0x43c0, 0x4458,
6861                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
6862                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
6863                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
6864                                  0x5fc0, 0x6000, 0x6400, 0x6428,
6865                                  0x6800, 0x6848, 0x684c, 0x6860,
6866                                  0x6888, 0x6910, 0x8000 };
6867
6868         regs->version = 0;
6869
6870         memset(p, 0, BNX2_REGDUMP_LEN);
6871
6872         if (!netif_running(bp->dev))
6873                 return;
6874
6875         i = 0;
6876         offset = reg_boundaries[0];
6877         p += offset;
6878         while (offset < BNX2_REGDUMP_LEN) {
6879                 *p++ = REG_RD(bp, offset);
6880                 offset += 4;
6881                 if (offset == reg_boundaries[i + 1]) {
6882                         offset = reg_boundaries[i + 2];
6883                         p = (u32 *) (orig_p + offset);
6884                         i += 2;
6885                 }
6886         }
6887 }
6888
6889 static void
6890 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6891 {
6892         struct bnx2 *bp = netdev_priv(dev);
6893
6894         if (bp->flags & BNX2_FLAG_NO_WOL) {
6895                 wol->supported = 0;
6896                 wol->wolopts = 0;
6897         }
6898         else {
6899                 wol->supported = WAKE_MAGIC;
6900                 if (bp->wol)
6901                         wol->wolopts = WAKE_MAGIC;
6902                 else
6903                         wol->wolopts = 0;
6904         }
6905         memset(&wol->sopass, 0, sizeof(wol->sopass));
6906 }
6907
6908 static int
6909 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6910 {
6911         struct bnx2 *bp = netdev_priv(dev);
6912
6913         if (wol->wolopts & ~WAKE_MAGIC)
6914                 return -EINVAL;
6915
6916         if (wol->wolopts & WAKE_MAGIC) {
6917                 if (bp->flags & BNX2_FLAG_NO_WOL)
6918                         return -EINVAL;
6919
6920                 bp->wol = 1;
6921         }
6922         else {
6923                 bp->wol = 0;
6924         }
6925         return 0;
6926 }
6927
6928 static int
6929 bnx2_nway_reset(struct net_device *dev)
6930 {
6931         struct bnx2 *bp = netdev_priv(dev);
6932         u32 bmcr;
6933
6934         if (!netif_running(dev))
6935                 return -EAGAIN;
6936
6937         if (!(bp->autoneg & AUTONEG_SPEED)) {
6938                 return -EINVAL;
6939         }
6940
6941         spin_lock_bh(&bp->phy_lock);
6942
6943         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6944                 int rc;
6945
6946                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6947                 spin_unlock_bh(&bp->phy_lock);
6948                 return rc;
6949         }
6950
6951         /* Force a link down visible on the other side */
6952         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6953                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6954                 spin_unlock_bh(&bp->phy_lock);
6955
6956                 msleep(20);
6957
6958                 spin_lock_bh(&bp->phy_lock);
6959
6960                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
6961                 bp->serdes_an_pending = 1;
6962                 mod_timer(&bp->timer, jiffies + bp->current_interval);
6963         }
6964
6965         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6966         bmcr &= ~BMCR_LOOPBACK;
6967         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6968
6969         spin_unlock_bh(&bp->phy_lock);
6970
6971         return 0;
6972 }
6973
6974 static u32
6975 bnx2_get_link(struct net_device *dev)
6976 {
6977         struct bnx2 *bp = netdev_priv(dev);
6978
6979         return bp->link_up;
6980 }
6981
6982 static int
6983 bnx2_get_eeprom_len(struct net_device *dev)
6984 {
6985         struct bnx2 *bp = netdev_priv(dev);
6986
6987         if (bp->flash_info == NULL)
6988                 return 0;
6989
6990         return (int) bp->flash_size;
6991 }
6992
6993 static int
6994 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6995                 u8 *eebuf)
6996 {
6997         struct bnx2 *bp = netdev_priv(dev);
6998         int rc;
6999
7000         if (!netif_running(dev))
7001                 return -EAGAIN;
7002
7003         /* parameters already validated in ethtool_get_eeprom */
7004
7005         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7006
7007         return rc;
7008 }
7009
7010 static int
7011 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7012                 u8 *eebuf)
7013 {
7014         struct bnx2 *bp = netdev_priv(dev);
7015         int rc;
7016
7017         if (!netif_running(dev))
7018                 return -EAGAIN;
7019
7020         /* parameters already validated in ethtool_set_eeprom */
7021
7022         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7023
7024         return rc;
7025 }
7026
7027 static int
7028 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7029 {
7030         struct bnx2 *bp = netdev_priv(dev);
7031
7032         memset(coal, 0, sizeof(struct ethtool_coalesce));
7033
7034         coal->rx_coalesce_usecs = bp->rx_ticks;
7035         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7036         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7037         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7038
7039         coal->tx_coalesce_usecs = bp->tx_ticks;
7040         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7041         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7042         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7043
7044         coal->stats_block_coalesce_usecs = bp->stats_ticks;
7045
7046         return 0;
7047 }
7048
7049 static int
7050 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7051 {
7052         struct bnx2 *bp = netdev_priv(dev);
7053
7054         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7055         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7056
7057         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7058         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7059
7060         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7061         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7062
7063         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7064         if (bp->rx_quick_cons_trip_int > 0xff)
7065                 bp->rx_quick_cons_trip_int = 0xff;
7066
7067         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7068         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7069
7070         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7071         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7072
7073         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7074         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7075
7076         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7077         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7078                 0xff;
7079
7080         bp->stats_ticks = coal->stats_block_coalesce_usecs;
7081         if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7082                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7083                         bp->stats_ticks = USEC_PER_SEC;
7084         }
7085         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7086                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7087         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7088
7089         if (netif_running(bp->dev)) {
7090                 bnx2_netif_stop(bp, true);
7091                 bnx2_init_nic(bp, 0);
7092                 bnx2_netif_start(bp, true);
7093         }
7094
7095         return 0;
7096 }
7097
7098 static void
7099 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7100 {
7101         struct bnx2 *bp = netdev_priv(dev);
7102
7103         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
7104         ering->rx_mini_max_pending = 0;
7105         ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
7106
7107         ering->rx_pending = bp->rx_ring_size;
7108         ering->rx_mini_pending = 0;
7109         ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7110
7111         ering->tx_max_pending = MAX_TX_DESC_CNT;
7112         ering->tx_pending = bp->tx_ring_size;
7113 }
7114
7115 static int
7116 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
7117 {
7118         if (netif_running(bp->dev)) {
7119                 /* Reset will erase chipset stats; save them */
7120                 bnx2_save_stats(bp);
7121
7122                 bnx2_netif_stop(bp, true);
7123                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7124                 bnx2_free_skbs(bp);
7125                 bnx2_free_mem(bp);
7126         }
7127
7128         bnx2_set_rx_ring_size(bp, rx);
7129         bp->tx_ring_size = tx;
7130
7131         if (netif_running(bp->dev)) {
7132                 int rc;
7133
7134                 rc = bnx2_alloc_mem(bp);
7135                 if (!rc)
7136                         rc = bnx2_init_nic(bp, 0);
7137
7138                 if (rc) {
7139                         bnx2_napi_enable(bp);
7140                         dev_close(bp->dev);
7141                         return rc;
7142                 }
7143 #ifdef BCM_CNIC
7144                 mutex_lock(&bp->cnic_lock);
7145                 /* Let cnic know about the new status block. */
7146                 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7147                         bnx2_setup_cnic_irq_info(bp);
7148                 mutex_unlock(&bp->cnic_lock);
7149 #endif
7150                 bnx2_netif_start(bp, true);
7151         }
7152         return 0;
7153 }
7154
7155 static int
7156 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7157 {
7158         struct bnx2 *bp = netdev_priv(dev);
7159         int rc;
7160
7161         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
7162                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
7163                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7164
7165                 return -EINVAL;
7166         }
7167         rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
7168         return rc;
7169 }
7170
7171 static void
7172 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7173 {
7174         struct bnx2 *bp = netdev_priv(dev);
7175
7176         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7177         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7178         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7179 }
7180
7181 static int
7182 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7183 {
7184         struct bnx2 *bp = netdev_priv(dev);
7185
7186         bp->req_flow_ctrl = 0;
7187         if (epause->rx_pause)
7188                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7189         if (epause->tx_pause)
7190                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7191
7192         if (epause->autoneg) {
7193                 bp->autoneg |= AUTONEG_FLOW_CTRL;
7194         }
7195         else {
7196                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7197         }
7198
7199         if (netif_running(dev)) {
7200                 spin_lock_bh(&bp->phy_lock);
7201                 bnx2_setup_phy(bp, bp->phy_port);
7202                 spin_unlock_bh(&bp->phy_lock);
7203         }
7204
7205         return 0;
7206 }
7207
7208 static u32
7209 bnx2_get_rx_csum(struct net_device *dev)
7210 {
7211         struct bnx2 *bp = netdev_priv(dev);
7212
7213         return bp->rx_csum;
7214 }
7215
7216 static int
7217 bnx2_set_rx_csum(struct net_device *dev, u32 data)
7218 {
7219         struct bnx2 *bp = netdev_priv(dev);
7220
7221         bp->rx_csum = data;
7222         return 0;
7223 }
7224
7225 static int
7226 bnx2_set_tso(struct net_device *dev, u32 data)
7227 {
7228         struct bnx2 *bp = netdev_priv(dev);
7229
7230         if (data) {
7231                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7232                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7233                         dev->features |= NETIF_F_TSO6;
7234         } else
7235                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
7236                                    NETIF_F_TSO_ECN);
7237         return 0;
7238 }
7239
7240 static struct {
7241         char string[ETH_GSTRING_LEN];
7242 } bnx2_stats_str_arr[] = {
7243         { "rx_bytes" },
7244         { "rx_error_bytes" },
7245         { "tx_bytes" },
7246         { "tx_error_bytes" },
7247         { "rx_ucast_packets" },
7248         { "rx_mcast_packets" },
7249         { "rx_bcast_packets" },
7250         { "tx_ucast_packets" },
7251         { "tx_mcast_packets" },
7252         { "tx_bcast_packets" },
7253         { "tx_mac_errors" },
7254         { "tx_carrier_errors" },
7255         { "rx_crc_errors" },
7256         { "rx_align_errors" },
7257         { "tx_single_collisions" },
7258         { "tx_multi_collisions" },
7259         { "tx_deferred" },
7260         { "tx_excess_collisions" },
7261         { "tx_late_collisions" },
7262         { "tx_total_collisions" },
7263         { "rx_fragments" },
7264         { "rx_jabbers" },
7265         { "rx_undersize_packets" },
7266         { "rx_oversize_packets" },
7267         { "rx_64_byte_packets" },
7268         { "rx_65_to_127_byte_packets" },
7269         { "rx_128_to_255_byte_packets" },
7270         { "rx_256_to_511_byte_packets" },
7271         { "rx_512_to_1023_byte_packets" },
7272         { "rx_1024_to_1522_byte_packets" },
7273         { "rx_1523_to_9022_byte_packets" },
7274         { "tx_64_byte_packets" },
7275         { "tx_65_to_127_byte_packets" },
7276         { "tx_128_to_255_byte_packets" },
7277         { "tx_256_to_511_byte_packets" },
7278         { "tx_512_to_1023_byte_packets" },
7279         { "tx_1024_to_1522_byte_packets" },
7280         { "tx_1523_to_9022_byte_packets" },
7281         { "rx_xon_frames" },
7282         { "rx_xoff_frames" },
7283         { "tx_xon_frames" },
7284         { "tx_xoff_frames" },
7285         { "rx_mac_ctrl_frames" },
7286         { "rx_filtered_packets" },
7287         { "rx_ftq_discards" },
7288         { "rx_discards" },
7289         { "rx_fw_discards" },
7290 };
7291
7292 #define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\
7293                         sizeof(bnx2_stats_str_arr[0]))
7294
7295 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7296
7297 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7298     STATS_OFFSET32(stat_IfHCInOctets_hi),
7299     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7300     STATS_OFFSET32(stat_IfHCOutOctets_hi),
7301     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7302     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7303     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7304     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7305     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7306     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7307     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7308     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7309     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7310     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7311     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7312     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7313     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7314     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7315     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7316     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7317     STATS_OFFSET32(stat_EtherStatsCollisions),
7318     STATS_OFFSET32(stat_EtherStatsFragments),
7319     STATS_OFFSET32(stat_EtherStatsJabbers),
7320     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7321     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7322     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7323     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7324     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7325     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7326     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7327     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7328     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7329     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7330     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7331     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7332     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7333     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7334     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7335     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7336     STATS_OFFSET32(stat_XonPauseFramesReceived),
7337     STATS_OFFSET32(stat_XoffPauseFramesReceived),
7338     STATS_OFFSET32(stat_OutXonSent),
7339     STATS_OFFSET32(stat_OutXoffSent),
7340     STATS_OFFSET32(stat_MacControlFramesReceived),
7341     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7342     STATS_OFFSET32(stat_IfInFTQDiscards),
7343     STATS_OFFSET32(stat_IfInMBUFDiscards),
7344     STATS_OFFSET32(stat_FwRxDrop),
7345 };
7346
7347 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7348  * skipped because of errata.
7349  */
7350 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7351         8,0,8,8,8,8,8,8,8,8,
7352         4,0,4,4,4,4,4,4,4,4,
7353         4,4,4,4,4,4,4,4,4,4,
7354         4,4,4,4,4,4,4,4,4,4,
7355         4,4,4,4,4,4,4,
7356 };
7357
7358 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7359         8,0,8,8,8,8,8,8,8,8,
7360         4,4,4,4,4,4,4,4,4,4,
7361         4,4,4,4,4,4,4,4,4,4,
7362         4,4,4,4,4,4,4,4,4,4,
7363         4,4,4,4,4,4,4,
7364 };
7365
7366 #define BNX2_NUM_TESTS 6
7367
7368 static struct {
7369         char string[ETH_GSTRING_LEN];
7370 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7371         { "register_test (offline)" },
7372         { "memory_test (offline)" },
7373         { "loopback_test (offline)" },
7374         { "nvram_test (online)" },
7375         { "interrupt_test (online)" },
7376         { "link_test (online)" },
7377 };
7378
7379 static int
7380 bnx2_get_sset_count(struct net_device *dev, int sset)
7381 {
7382         switch (sset) {
7383         case ETH_SS_TEST:
7384                 return BNX2_NUM_TESTS;
7385         case ETH_SS_STATS:
7386                 return BNX2_NUM_STATS;
7387         default:
7388                 return -EOPNOTSUPP;
7389         }
7390 }
7391
7392 static void
7393 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7394 {
7395         struct bnx2 *bp = netdev_priv(dev);
7396
7397         bnx2_set_power_state(bp, PCI_D0);
7398
7399         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7400         if (etest->flags & ETH_TEST_FL_OFFLINE) {
7401                 int i;
7402
7403                 bnx2_netif_stop(bp, true);
7404                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7405                 bnx2_free_skbs(bp);
7406
7407                 if (bnx2_test_registers(bp) != 0) {
7408                         buf[0] = 1;
7409                         etest->flags |= ETH_TEST_FL_FAILED;
7410                 }
7411                 if (bnx2_test_memory(bp) != 0) {
7412                         buf[1] = 1;
7413                         etest->flags |= ETH_TEST_FL_FAILED;
7414                 }
7415                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7416                         etest->flags |= ETH_TEST_FL_FAILED;
7417
7418                 if (!netif_running(bp->dev))
7419                         bnx2_shutdown_chip(bp);
7420                 else {
7421                         bnx2_init_nic(bp, 1);
7422                         bnx2_netif_start(bp, true);
7423                 }
7424
7425                 /* wait for link up */
7426                 for (i = 0; i < 7; i++) {
7427                         if (bp->link_up)
7428                                 break;
7429                         msleep_interruptible(1000);
7430                 }
7431         }
7432
7433         if (bnx2_test_nvram(bp) != 0) {
7434                 buf[3] = 1;
7435                 etest->flags |= ETH_TEST_FL_FAILED;
7436         }
7437         if (bnx2_test_intr(bp) != 0) {
7438                 buf[4] = 1;
7439                 etest->flags |= ETH_TEST_FL_FAILED;
7440         }
7441
7442         if (bnx2_test_link(bp) != 0) {
7443                 buf[5] = 1;
7444                 etest->flags |= ETH_TEST_FL_FAILED;
7445
7446         }
7447         if (!netif_running(bp->dev))
7448                 bnx2_set_power_state(bp, PCI_D3hot);
7449 }
7450
7451 static void
7452 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7453 {
7454         switch (stringset) {
7455         case ETH_SS_STATS:
7456                 memcpy(buf, bnx2_stats_str_arr,
7457                         sizeof(bnx2_stats_str_arr));
7458                 break;
7459         case ETH_SS_TEST:
7460                 memcpy(buf, bnx2_tests_str_arr,
7461                         sizeof(bnx2_tests_str_arr));
7462                 break;
7463         }
7464 }
7465
7466 static void
7467 bnx2_get_ethtool_stats(struct net_device *dev,
7468                 struct ethtool_stats *stats, u64 *buf)
7469 {
7470         struct bnx2 *bp = netdev_priv(dev);
7471         int i;
7472         u32 *hw_stats = (u32 *) bp->stats_blk;
7473         u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7474         u8 *stats_len_arr = NULL;
7475
7476         if (hw_stats == NULL) {
7477                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7478                 return;
7479         }
7480
7481         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7482             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7483             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7484             (CHIP_ID(bp) == CHIP_ID_5708_A0))
7485                 stats_len_arr = bnx2_5706_stats_len_arr;
7486         else
7487                 stats_len_arr = bnx2_5708_stats_len_arr;
7488
7489         for (i = 0; i < BNX2_NUM_STATS; i++) {
7490                 unsigned long offset;
7491
7492                 if (stats_len_arr[i] == 0) {
7493                         /* skip this counter */
7494                         buf[i] = 0;
7495                         continue;
7496                 }
7497
7498                 offset = bnx2_stats_offset_arr[i];
7499                 if (stats_len_arr[i] == 4) {
7500                         /* 4-byte counter */
7501                         buf[i] = (u64) *(hw_stats + offset) +
7502                                  *(temp_stats + offset);
7503                         continue;
7504                 }
7505                 /* 8-byte counter */
7506                 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7507                          *(hw_stats + offset + 1) +
7508                          (((u64) *(temp_stats + offset)) << 32) +
7509                          *(temp_stats + offset + 1);
7510         }
7511 }
7512
7513 static int
7514 bnx2_phys_id(struct net_device *dev, u32 data)
7515 {
7516         struct bnx2 *bp = netdev_priv(dev);
7517         int i;
7518         u32 save;
7519
7520         bnx2_set_power_state(bp, PCI_D0);
7521
7522         if (data == 0)
7523                 data = 2;
7524
7525         save = REG_RD(bp, BNX2_MISC_CFG);
7526         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7527
7528         for (i = 0; i < (data * 2); i++) {
7529                 if ((i % 2) == 0) {
7530                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7531                 }
7532                 else {
7533                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7534                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
7535                                 BNX2_EMAC_LED_100MB_OVERRIDE |
7536                                 BNX2_EMAC_LED_10MB_OVERRIDE |
7537                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7538                                 BNX2_EMAC_LED_TRAFFIC);
7539                 }
7540                 msleep_interruptible(500);
7541                 if (signal_pending(current))
7542                         break;
7543         }
7544         REG_WR(bp, BNX2_EMAC_LED, 0);
7545         REG_WR(bp, BNX2_MISC_CFG, save);
7546
7547         if (!netif_running(dev))
7548                 bnx2_set_power_state(bp, PCI_D3hot);
7549
7550         return 0;
7551 }
7552
7553 static int
7554 bnx2_set_tx_csum(struct net_device *dev, u32 data)
7555 {
7556         struct bnx2 *bp = netdev_priv(dev);
7557
7558         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7559                 return (ethtool_op_set_tx_ipv6_csum(dev, data));
7560         else
7561                 return (ethtool_op_set_tx_csum(dev, data));
7562 }
7563
7564 static const struct ethtool_ops bnx2_ethtool_ops = {
7565         .get_settings           = bnx2_get_settings,
7566         .set_settings           = bnx2_set_settings,
7567         .get_drvinfo            = bnx2_get_drvinfo,
7568         .get_regs_len           = bnx2_get_regs_len,
7569         .get_regs               = bnx2_get_regs,
7570         .get_wol                = bnx2_get_wol,
7571         .set_wol                = bnx2_set_wol,
7572         .nway_reset             = bnx2_nway_reset,
7573         .get_link               = bnx2_get_link,
7574         .get_eeprom_len         = bnx2_get_eeprom_len,
7575         .get_eeprom             = bnx2_get_eeprom,
7576         .set_eeprom             = bnx2_set_eeprom,
7577         .get_coalesce           = bnx2_get_coalesce,
7578         .set_coalesce           = bnx2_set_coalesce,
7579         .get_ringparam          = bnx2_get_ringparam,
7580         .set_ringparam          = bnx2_set_ringparam,
7581         .get_pauseparam         = bnx2_get_pauseparam,
7582         .set_pauseparam         = bnx2_set_pauseparam,
7583         .get_rx_csum            = bnx2_get_rx_csum,
7584         .set_rx_csum            = bnx2_set_rx_csum,
7585         .set_tx_csum            = bnx2_set_tx_csum,
7586         .set_sg                 = ethtool_op_set_sg,
7587         .set_tso                = bnx2_set_tso,
7588         .self_test              = bnx2_self_test,
7589         .get_strings            = bnx2_get_strings,
7590         .phys_id                = bnx2_phys_id,
7591         .get_ethtool_stats      = bnx2_get_ethtool_stats,
7592         .get_sset_count         = bnx2_get_sset_count,
7593 };
7594
7595 /* Called with rtnl_lock */
7596 static int
7597 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7598 {
7599         struct mii_ioctl_data *data = if_mii(ifr);
7600         struct bnx2 *bp = netdev_priv(dev);
7601         int err;
7602
7603         switch(cmd) {
7604         case SIOCGMIIPHY:
7605                 data->phy_id = bp->phy_addr;
7606
7607                 /* fallthru */
7608         case SIOCGMIIREG: {
7609                 u32 mii_regval;
7610
7611                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7612                         return -EOPNOTSUPP;
7613
7614                 if (!netif_running(dev))
7615                         return -EAGAIN;
7616
7617                 spin_lock_bh(&bp->phy_lock);
7618                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7619                 spin_unlock_bh(&bp->phy_lock);
7620
7621                 data->val_out = mii_regval;
7622
7623                 return err;
7624         }
7625
7626         case SIOCSMIIREG:
7627                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7628                         return -EOPNOTSUPP;
7629
7630                 if (!netif_running(dev))
7631                         return -EAGAIN;
7632
7633                 spin_lock_bh(&bp->phy_lock);
7634                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7635                 spin_unlock_bh(&bp->phy_lock);
7636
7637                 return err;
7638
7639         default:
7640                 /* do nothing */
7641                 break;
7642         }
7643         return -EOPNOTSUPP;
7644 }
7645
7646 /* Called with rtnl_lock */
7647 static int
7648 bnx2_change_mac_addr(struct net_device *dev, void *p)
7649 {
7650         struct sockaddr *addr = p;
7651         struct bnx2 *bp = netdev_priv(dev);
7652
7653         if (!is_valid_ether_addr(addr->sa_data))
7654                 return -EINVAL;
7655
7656         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7657         if (netif_running(dev))
7658                 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7659
7660         return 0;
7661 }
7662
7663 /* Called with rtnl_lock */
7664 static int
7665 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7666 {
7667         struct bnx2 *bp = netdev_priv(dev);
7668
7669         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7670                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7671                 return -EINVAL;
7672
7673         dev->mtu = new_mtu;
7674         return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7675 }
7676
7677 #ifdef CONFIG_NET_POLL_CONTROLLER
7678 static void
7679 poll_bnx2(struct net_device *dev)
7680 {
7681         struct bnx2 *bp = netdev_priv(dev);
7682         int i;
7683
7684         for (i = 0; i < bp->irq_nvecs; i++) {
7685                 struct bnx2_irq *irq = &bp->irq_tbl[i];
7686
7687                 disable_irq(irq->vector);
7688                 irq->handler(irq->vector, &bp->bnx2_napi[i]);
7689                 enable_irq(irq->vector);
7690         }
7691 }
7692 #endif
7693
7694 static void __devinit
7695 bnx2_get_5709_media(struct bnx2 *bp)
7696 {
7697         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7698         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7699         u32 strap;
7700
7701         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7702                 return;
7703         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7704                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7705                 return;
7706         }
7707
7708         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7709                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7710         else
7711                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7712
7713         if (PCI_FUNC(bp->pdev->devfn) == 0) {
7714                 switch (strap) {
7715                 case 0x4:
7716                 case 0x5:
7717                 case 0x6:
7718                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7719                         return;
7720                 }
7721         } else {
7722                 switch (strap) {
7723                 case 0x1:
7724                 case 0x2:
7725                 case 0x4:
7726                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7727                         return;
7728                 }
7729         }
7730 }
7731
7732 static void __devinit
7733 bnx2_get_pci_speed(struct bnx2 *bp)
7734 {
7735         u32 reg;
7736
7737         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7738         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7739                 u32 clkreg;
7740
7741                 bp->flags |= BNX2_FLAG_PCIX;
7742
7743                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7744
7745                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7746                 switch (clkreg) {
7747                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7748                         bp->bus_speed_mhz = 133;
7749                         break;
7750
7751                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7752                         bp->bus_speed_mhz = 100;
7753                         break;
7754
7755                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7756                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7757                         bp->bus_speed_mhz = 66;
7758                         break;
7759
7760                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7761                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7762                         bp->bus_speed_mhz = 50;
7763                         break;
7764
7765                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7766                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7767                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7768                         bp->bus_speed_mhz = 33;
7769                         break;
7770                 }
7771         }
7772         else {
7773                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7774                         bp->bus_speed_mhz = 66;
7775                 else
7776                         bp->bus_speed_mhz = 33;
7777         }
7778
7779         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7780                 bp->flags |= BNX2_FLAG_PCI_32BIT;
7781
7782 }
7783
7784 static void __devinit
7785 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7786 {
7787         int rc, i, j;
7788         u8 *data;
7789         unsigned int block_end, rosize, len;
7790
7791 #define BNX2_VPD_NVRAM_OFFSET   0x300
7792 #define BNX2_VPD_LEN            128
7793 #define BNX2_MAX_VER_SLEN       30
7794
7795         data = kmalloc(256, GFP_KERNEL);
7796         if (!data)
7797                 return;
7798
7799         rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
7800                              BNX2_VPD_LEN);
7801         if (rc)
7802                 goto vpd_done;
7803
7804         for (i = 0; i < BNX2_VPD_LEN; i += 4) {
7805                 data[i] = data[i + BNX2_VPD_LEN + 3];
7806                 data[i + 1] = data[i + BNX2_VPD_LEN + 2];
7807                 data[i + 2] = data[i + BNX2_VPD_LEN + 1];
7808                 data[i + 3] = data[i + BNX2_VPD_LEN];
7809         }
7810
7811         i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
7812         if (i < 0)
7813                 goto vpd_done;
7814
7815         rosize = pci_vpd_lrdt_size(&data[i]);
7816         i += PCI_VPD_LRDT_TAG_SIZE;
7817         block_end = i + rosize;
7818
7819         if (block_end > BNX2_VPD_LEN)
7820                 goto vpd_done;
7821
7822         j = pci_vpd_find_info_keyword(data, i, rosize,
7823                                       PCI_VPD_RO_KEYWORD_MFR_ID);
7824         if (j < 0)
7825                 goto vpd_done;
7826
7827         len = pci_vpd_info_field_size(&data[j]);
7828
7829         j += PCI_VPD_INFO_FLD_HDR_SIZE;
7830         if (j + len > block_end || len != 4 ||
7831             memcmp(&data[j], "1028", 4))
7832                 goto vpd_done;
7833
7834         j = pci_vpd_find_info_keyword(data, i, rosize,
7835                                       PCI_VPD_RO_KEYWORD_VENDOR0);
7836         if (j < 0)
7837                 goto vpd_done;
7838
7839         len = pci_vpd_info_field_size(&data[j]);
7840
7841         j += PCI_VPD_INFO_FLD_HDR_SIZE;
7842         if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
7843                 goto vpd_done;
7844
7845         memcpy(bp->fw_version, &data[j], len);
7846         bp->fw_version[len] = ' ';
7847
7848 vpd_done:
7849         kfree(data);
7850 }
7851
7852 static int __devinit
7853 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7854 {
7855         struct bnx2 *bp;
7856         unsigned long mem_len;
7857         int rc, i, j;
7858         u32 reg;
7859         u64 dma_mask, persist_dma_mask;
7860
7861         SET_NETDEV_DEV(dev, &pdev->dev);
7862         bp = netdev_priv(dev);
7863
7864         bp->flags = 0;
7865         bp->phy_flags = 0;
7866
7867         bp->temp_stats_blk =
7868                 kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
7869
7870         if (bp->temp_stats_blk == NULL) {
7871                 rc = -ENOMEM;
7872                 goto err_out;
7873         }
7874
7875         /* enable device (incl. PCI PM wakeup), and bus-mastering */
7876         rc = pci_enable_device(pdev);
7877         if (rc) {
7878                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
7879                 goto err_out;
7880         }
7881
7882         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7883                 dev_err(&pdev->dev,
7884                         "Cannot find PCI device base address, aborting\n");
7885                 rc = -ENODEV;
7886                 goto err_out_disable;
7887         }
7888
7889         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7890         if (rc) {
7891                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
7892                 goto err_out_disable;
7893         }
7894
7895         pci_set_master(pdev);
7896         pci_save_state(pdev);
7897
7898         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7899         if (bp->pm_cap == 0) {
7900                 dev_err(&pdev->dev,
7901                         "Cannot find power management capability, aborting\n");
7902                 rc = -EIO;
7903                 goto err_out_release;
7904         }
7905
7906         bp->dev = dev;
7907         bp->pdev = pdev;
7908
7909         spin_lock_init(&bp->phy_lock);
7910         spin_lock_init(&bp->indirect_lock);
7911 #ifdef BCM_CNIC
7912         mutex_init(&bp->cnic_lock);
7913 #endif
7914         INIT_WORK(&bp->reset_task, bnx2_reset_task);
7915
7916         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7917         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
7918         dev->mem_end = dev->mem_start + mem_len;
7919         dev->irq = pdev->irq;
7920
7921         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7922
7923         if (!bp->regview) {
7924                 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
7925                 rc = -ENOMEM;
7926                 goto err_out_release;
7927         }
7928
7929         /* Configure byte swap and enable write to the reg_window registers.
7930          * Rely on CPU to do target byte swapping on big endian systems
7931          * The chip's target access swapping will not swap all accesses
7932          */
7933         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7934                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7935                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7936
7937         bnx2_set_power_state(bp, PCI_D0);
7938
7939         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7940
7941         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7942                 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7943                         dev_err(&pdev->dev,
7944                                 "Cannot find PCIE capability, aborting\n");
7945                         rc = -EIO;
7946                         goto err_out_unmap;
7947                 }
7948                 bp->flags |= BNX2_FLAG_PCIE;
7949                 if (CHIP_REV(bp) == CHIP_REV_Ax)
7950                         bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7951         } else {
7952                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7953                 if (bp->pcix_cap == 0) {
7954                         dev_err(&pdev->dev,
7955                                 "Cannot find PCIX capability, aborting\n");
7956                         rc = -EIO;
7957                         goto err_out_unmap;
7958                 }
7959                 bp->flags |= BNX2_FLAG_BROKEN_STATS;
7960         }
7961
7962         if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7963                 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7964                         bp->flags |= BNX2_FLAG_MSIX_CAP;
7965         }
7966
7967         if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7968                 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7969                         bp->flags |= BNX2_FLAG_MSI_CAP;
7970         }
7971
7972         /* 5708 cannot support DMA addresses > 40-bit.  */
7973         if (CHIP_NUM(bp) == CHIP_NUM_5708)
7974                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
7975         else
7976                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
7977
7978         /* Configure DMA attributes. */
7979         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7980                 dev->features |= NETIF_F_HIGHDMA;
7981                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7982                 if (rc) {
7983                         dev_err(&pdev->dev,
7984                                 "pci_set_consistent_dma_mask failed, aborting\n");
7985                         goto err_out_unmap;
7986                 }
7987         } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
7988                 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
7989                 goto err_out_unmap;
7990         }
7991
7992         if (!(bp->flags & BNX2_FLAG_PCIE))
7993                 bnx2_get_pci_speed(bp);
7994
7995         /* 5706A0 may falsely detect SERR and PERR. */
7996         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7997                 reg = REG_RD(bp, PCI_COMMAND);
7998                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7999                 REG_WR(bp, PCI_COMMAND, reg);
8000         }
8001         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
8002                 !(bp->flags & BNX2_FLAG_PCIX)) {
8003
8004                 dev_err(&pdev->dev,
8005                         "5706 A1 can only be used in a PCIX bus, aborting\n");
8006                 goto err_out_unmap;
8007         }
8008
8009         bnx2_init_nvram(bp);
8010
8011         reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8012
8013         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8014             BNX2_SHM_HDR_SIGNATURE_SIG) {
8015                 u32 off = PCI_FUNC(pdev->devfn) << 2;
8016
8017                 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8018         } else
8019                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8020
8021         /* Get the permanent MAC address.  First we need to make sure the
8022          * firmware is actually running.
8023          */
8024         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8025
8026         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8027             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8028                 dev_err(&pdev->dev, "Firmware not running, aborting\n");
8029                 rc = -ENODEV;
8030                 goto err_out_unmap;
8031         }
8032
8033         bnx2_read_vpd_fw_ver(bp);
8034
8035         j = strlen(bp->fw_version);
8036         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8037         for (i = 0; i < 3 && j < 24; i++) {
8038                 u8 num, k, skip0;
8039
8040                 if (i == 0) {
8041                         bp->fw_version[j++] = 'b';
8042                         bp->fw_version[j++] = 'c';
8043                         bp->fw_version[j++] = ' ';
8044                 }
8045                 num = (u8) (reg >> (24 - (i * 8)));
8046                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8047                         if (num >= k || !skip0 || k == 1) {
8048                                 bp->fw_version[j++] = (num / k) + '0';
8049                                 skip0 = 0;
8050                         }
8051                 }
8052                 if (i != 2)
8053                         bp->fw_version[j++] = '.';
8054         }
8055         reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8056         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8057                 bp->wol = 1;
8058
8059         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8060                 bp->flags |= BNX2_FLAG_ASF_ENABLE;
8061
8062                 for (i = 0; i < 30; i++) {
8063                         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8064                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8065                                 break;
8066                         msleep(10);
8067                 }
8068         }
8069         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8070         reg &= BNX2_CONDITION_MFW_RUN_MASK;
8071         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8072             reg != BNX2_CONDITION_MFW_RUN_NONE) {
8073                 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8074
8075                 if (j < 32)
8076                         bp->fw_version[j++] = ' ';
8077                 for (i = 0; i < 3 && j < 28; i++) {
8078                         reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8079                         reg = swab32(reg);
8080                         memcpy(&bp->fw_version[j], &reg, 4);
8081                         j += 4;
8082                 }
8083         }
8084
8085         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8086         bp->mac_addr[0] = (u8) (reg >> 8);
8087         bp->mac_addr[1] = (u8) reg;
8088
8089         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8090         bp->mac_addr[2] = (u8) (reg >> 24);
8091         bp->mac_addr[3] = (u8) (reg >> 16);
8092         bp->mac_addr[4] = (u8) (reg >> 8);
8093         bp->mac_addr[5] = (u8) reg;
8094
8095         bp->tx_ring_size = MAX_TX_DESC_CNT;
8096         bnx2_set_rx_ring_size(bp, 255);
8097
8098         bp->rx_csum = 1;
8099
8100         bp->tx_quick_cons_trip_int = 2;
8101         bp->tx_quick_cons_trip = 20;
8102         bp->tx_ticks_int = 18;
8103         bp->tx_ticks = 80;
8104
8105         bp->rx_quick_cons_trip_int = 2;
8106         bp->rx_quick_cons_trip = 12;
8107         bp->rx_ticks_int = 18;
8108         bp->rx_ticks = 18;
8109
8110         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8111
8112         bp->current_interval = BNX2_TIMER_INTERVAL;
8113
8114         bp->phy_addr = 1;
8115
8116         /* Disable WOL support if we are running on a SERDES chip. */
8117         if (CHIP_NUM(bp) == CHIP_NUM_5709)
8118                 bnx2_get_5709_media(bp);
8119         else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
8120                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8121
8122         bp->phy_port = PORT_TP;
8123         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8124                 bp->phy_port = PORT_FIBRE;
8125                 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8126                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8127                         bp->flags |= BNX2_FLAG_NO_WOL;
8128                         bp->wol = 0;
8129                 }
8130                 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
8131                         /* Don't do parallel detect on this board because of
8132                          * some board problems.  The link will not go down
8133                          * if we do parallel detect.
8134                          */
8135                         if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8136                             pdev->subsystem_device == 0x310c)
8137                                 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8138                 } else {
8139                         bp->phy_addr = 2;
8140                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8141                                 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8142                 }
8143         } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
8144                    CHIP_NUM(bp) == CHIP_NUM_5708)
8145                 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8146         else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
8147                  (CHIP_REV(bp) == CHIP_REV_Ax ||
8148                   CHIP_REV(bp) == CHIP_REV_Bx))
8149                 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8150
8151         bnx2_init_fw_cap(bp);
8152
8153         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
8154             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
8155             (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
8156             !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8157                 bp->flags |= BNX2_FLAG_NO_WOL;
8158                 bp->wol = 0;
8159         }
8160
8161         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8162                 bp->tx_quick_cons_trip_int =
8163                         bp->tx_quick_cons_trip;
8164                 bp->tx_ticks_int = bp->tx_ticks;
8165                 bp->rx_quick_cons_trip_int =
8166                         bp->rx_quick_cons_trip;
8167                 bp->rx_ticks_int = bp->rx_ticks;
8168                 bp->comp_prod_trip_int = bp->comp_prod_trip;
8169                 bp->com_ticks_int = bp->com_ticks;
8170                 bp->cmd_ticks_int = bp->cmd_ticks;
8171         }
8172
8173         /* Disable MSI on 5706 if AMD 8132 bridge is found.
8174          *
8175          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
8176          * with byte enables disabled on the unused 32-bit word.  This is legal
8177          * but causes problems on the AMD 8132 which will eventually stop
8178          * responding after a while.
8179          *
8180          * AMD believes this incompatibility is unique to the 5706, and
8181          * prefers to locally disable MSI rather than globally disabling it.
8182          */
8183         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
8184                 struct pci_dev *amd_8132 = NULL;
8185
8186                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8187                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
8188                                                   amd_8132))) {
8189
8190                         if (amd_8132->revision >= 0x10 &&
8191                             amd_8132->revision <= 0x13) {
8192                                 disable_msi = 1;
8193                                 pci_dev_put(amd_8132);
8194                                 break;
8195                         }
8196                 }
8197         }
8198
8199         bnx2_set_default_link(bp);
8200         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8201
8202         init_timer(&bp->timer);
8203         bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8204         bp->timer.data = (unsigned long) bp;
8205         bp->timer.function = bnx2_timer;
8206
8207         return 0;
8208
8209 err_out_unmap:
8210         if (bp->regview) {
8211                 iounmap(bp->regview);
8212                 bp->regview = NULL;
8213         }
8214
8215 err_out_release:
8216         pci_release_regions(pdev);
8217
8218 err_out_disable:
8219         pci_disable_device(pdev);
8220         pci_set_drvdata(pdev, NULL);
8221
8222 err_out:
8223         return rc;
8224 }
8225
8226 static char * __devinit
8227 bnx2_bus_string(struct bnx2 *bp, char *str)
8228 {
8229         char *s = str;
8230
8231         if (bp->flags & BNX2_FLAG_PCIE) {
8232                 s += sprintf(s, "PCI Express");
8233         } else {
8234                 s += sprintf(s, "PCI");
8235                 if (bp->flags & BNX2_FLAG_PCIX)
8236                         s += sprintf(s, "-X");
8237                 if (bp->flags & BNX2_FLAG_PCI_32BIT)
8238                         s += sprintf(s, " 32-bit");
8239                 else
8240                         s += sprintf(s, " 64-bit");
8241                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8242         }
8243         return str;
8244 }
8245
8246 static void
8247 bnx2_del_napi(struct bnx2 *bp)
8248 {
8249         int i;
8250
8251         for (i = 0; i < bp->irq_nvecs; i++)
8252                 netif_napi_del(&bp->bnx2_napi[i].napi);
8253 }
8254
8255 static void
8256 bnx2_init_napi(struct bnx2 *bp)
8257 {
8258         int i;
8259
8260         for (i = 0; i < bp->irq_nvecs; i++) {
8261                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8262                 int (*poll)(struct napi_struct *, int);
8263
8264                 if (i == 0)
8265                         poll = bnx2_poll;
8266                 else
8267                         poll = bnx2_poll_msix;
8268
8269                 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8270                 bnapi->bp = bp;
8271         }
8272 }
8273
8274 static const struct net_device_ops bnx2_netdev_ops = {
8275         .ndo_open               = bnx2_open,
8276         .ndo_start_xmit         = bnx2_start_xmit,
8277         .ndo_stop               = bnx2_close,
8278         .ndo_get_stats          = bnx2_get_stats,
8279         .ndo_set_rx_mode        = bnx2_set_rx_mode,
8280         .ndo_do_ioctl           = bnx2_ioctl,
8281         .ndo_validate_addr      = eth_validate_addr,
8282         .ndo_set_mac_address    = bnx2_change_mac_addr,
8283         .ndo_change_mtu         = bnx2_change_mtu,
8284         .ndo_tx_timeout         = bnx2_tx_timeout,
8285 #ifdef BCM_VLAN
8286         .ndo_vlan_rx_register   = bnx2_vlan_rx_register,
8287 #endif
8288 #ifdef CONFIG_NET_POLL_CONTROLLER
8289         .ndo_poll_controller    = poll_bnx2,
8290 #endif
8291 };
8292
8293 static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
8294 {
8295 #ifdef BCM_VLAN
8296         dev->vlan_features |= flags;
8297 #endif
8298 }
8299
8300 static int __devinit
8301 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8302 {
8303         static int version_printed = 0;
8304         struct net_device *dev = NULL;
8305         struct bnx2 *bp;
8306         int rc;
8307         char str[40];
8308
8309         if (version_printed++ == 0)
8310                 pr_info("%s", version);
8311
8312         /* dev zeroed in init_etherdev */
8313         dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8314
8315         if (!dev)
8316                 return -ENOMEM;
8317
8318         rc = bnx2_init_board(pdev, dev);
8319         if (rc < 0) {
8320                 free_netdev(dev);
8321                 return rc;
8322         }
8323
8324         dev->netdev_ops = &bnx2_netdev_ops;
8325         dev->watchdog_timeo = TX_TIMEOUT;
8326         dev->ethtool_ops = &bnx2_ethtool_ops;
8327
8328         bp = netdev_priv(dev);
8329
8330         pci_set_drvdata(pdev, dev);
8331
8332         rc = bnx2_request_firmware(bp);
8333         if (rc)
8334                 goto error;
8335
8336         memcpy(dev->dev_addr, bp->mac_addr, 6);
8337         memcpy(dev->perm_addr, bp->mac_addr, 6);
8338
8339         dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO;
8340         vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG);
8341         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8342                 dev->features |= NETIF_F_IPV6_CSUM;
8343                 vlan_features_add(dev, NETIF_F_IPV6_CSUM);
8344         }
8345 #ifdef BCM_VLAN
8346         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8347 #endif
8348         dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
8349         vlan_features_add(dev, NETIF_F_TSO | NETIF_F_TSO_ECN);
8350         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8351                 dev->features |= NETIF_F_TSO6;
8352                 vlan_features_add(dev, NETIF_F_TSO6);
8353         }
8354         if ((rc = register_netdev(dev))) {
8355                 dev_err(&pdev->dev, "Cannot register net device\n");
8356                 goto error;
8357         }
8358
8359         netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, node addr %pM\n",
8360                     board_info[ent->driver_data].name,
8361                     ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8362                     ((CHIP_ID(bp) & 0x0ff0) >> 4),
8363                     bnx2_bus_string(bp, str),
8364                     dev->base_addr,
8365                     bp->pdev->irq, dev->dev_addr);
8366
8367         return 0;
8368
8369 error:
8370         if (bp->mips_firmware)
8371                 release_firmware(bp->mips_firmware);
8372         if (bp->rv2p_firmware)
8373                 release_firmware(bp->rv2p_firmware);
8374
8375         if (bp->regview)
8376                 iounmap(bp->regview);
8377         pci_release_regions(pdev);
8378         pci_disable_device(pdev);
8379         pci_set_drvdata(pdev, NULL);
8380         free_netdev(dev);
8381         return rc;
8382 }
8383
8384 static void __devexit
8385 bnx2_remove_one(struct pci_dev *pdev)
8386 {
8387         struct net_device *dev = pci_get_drvdata(pdev);
8388         struct bnx2 *bp = netdev_priv(dev);
8389
8390         flush_scheduled_work();
8391
8392         unregister_netdev(dev);
8393
8394         if (bp->mips_firmware)
8395                 release_firmware(bp->mips_firmware);
8396         if (bp->rv2p_firmware)
8397                 release_firmware(bp->rv2p_firmware);
8398
8399         if (bp->regview)
8400                 iounmap(bp->regview);
8401
8402         kfree(bp->temp_stats_blk);
8403
8404         free_netdev(dev);
8405         pci_release_regions(pdev);
8406         pci_disable_device(pdev);
8407         pci_set_drvdata(pdev, NULL);
8408 }
8409
8410 static int
8411 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8412 {
8413         struct net_device *dev = pci_get_drvdata(pdev);
8414         struct bnx2 *bp = netdev_priv(dev);
8415
8416         /* PCI register 4 needs to be saved whether netif_running() or not.
8417          * MSI address and data need to be saved if using MSI and
8418          * netif_running().
8419          */
8420         pci_save_state(pdev);
8421         if (!netif_running(dev))
8422                 return 0;
8423
8424         flush_scheduled_work();
8425         bnx2_netif_stop(bp, true);
8426         netif_device_detach(dev);
8427         del_timer_sync(&bp->timer);
8428         bnx2_shutdown_chip(bp);
8429         bnx2_free_skbs(bp);
8430         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
8431         return 0;
8432 }
8433
8434 static int
8435 bnx2_resume(struct pci_dev *pdev)
8436 {
8437         struct net_device *dev = pci_get_drvdata(pdev);
8438         struct bnx2 *bp = netdev_priv(dev);
8439
8440         pci_restore_state(pdev);
8441         if (!netif_running(dev))
8442                 return 0;
8443
8444         bnx2_set_power_state(bp, PCI_D0);
8445         netif_device_attach(dev);
8446         bnx2_init_nic(bp, 1);
8447         bnx2_netif_start(bp, true);
8448         return 0;
8449 }
8450
8451 /**
8452  * bnx2_io_error_detected - called when PCI error is detected
8453  * @pdev: Pointer to PCI device
8454  * @state: The current pci connection state
8455  *
8456  * This function is called after a PCI bus error affecting
8457  * this device has been detected.
8458  */
8459 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8460                                                pci_channel_state_t state)
8461 {
8462         struct net_device *dev = pci_get_drvdata(pdev);
8463         struct bnx2 *bp = netdev_priv(dev);
8464
8465         rtnl_lock();
8466         netif_device_detach(dev);
8467
8468         if (state == pci_channel_io_perm_failure) {
8469                 rtnl_unlock();
8470                 return PCI_ERS_RESULT_DISCONNECT;
8471         }
8472
8473         if (netif_running(dev)) {
8474                 bnx2_netif_stop(bp, true);
8475                 del_timer_sync(&bp->timer);
8476                 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8477         }
8478
8479         pci_disable_device(pdev);
8480         rtnl_unlock();
8481
8482         /* Request a slot slot reset. */
8483         return PCI_ERS_RESULT_NEED_RESET;
8484 }
8485
8486 /**
8487  * bnx2_io_slot_reset - called after the pci bus has been reset.
8488  * @pdev: Pointer to PCI device
8489  *
8490  * Restart the card from scratch, as if from a cold-boot.
8491  */
8492 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8493 {
8494         struct net_device *dev = pci_get_drvdata(pdev);
8495         struct bnx2 *bp = netdev_priv(dev);
8496
8497         rtnl_lock();
8498         if (pci_enable_device(pdev)) {
8499                 dev_err(&pdev->dev,
8500                         "Cannot re-enable PCI device after reset\n");
8501                 rtnl_unlock();
8502                 return PCI_ERS_RESULT_DISCONNECT;
8503         }
8504         pci_set_master(pdev);
8505         pci_restore_state(pdev);
8506         pci_save_state(pdev);
8507
8508         if (netif_running(dev)) {
8509                 bnx2_set_power_state(bp, PCI_D0);
8510                 bnx2_init_nic(bp, 1);
8511         }
8512
8513         rtnl_unlock();
8514         return PCI_ERS_RESULT_RECOVERED;
8515 }
8516
8517 /**
8518  * bnx2_io_resume - called when traffic can start flowing again.
8519  * @pdev: Pointer to PCI device
8520  *
8521  * This callback is called when the error recovery driver tells us that
8522  * its OK to resume normal operation.
8523  */
8524 static void bnx2_io_resume(struct pci_dev *pdev)
8525 {
8526         struct net_device *dev = pci_get_drvdata(pdev);
8527         struct bnx2 *bp = netdev_priv(dev);
8528
8529         rtnl_lock();
8530         if (netif_running(dev))
8531                 bnx2_netif_start(bp, true);
8532
8533         netif_device_attach(dev);
8534         rtnl_unlock();
8535 }
8536
8537 static struct pci_error_handlers bnx2_err_handler = {
8538         .error_detected = bnx2_io_error_detected,
8539         .slot_reset     = bnx2_io_slot_reset,
8540         .resume         = bnx2_io_resume,
8541 };
8542
8543 static struct pci_driver bnx2_pci_driver = {
8544         .name           = DRV_MODULE_NAME,
8545         .id_table       = bnx2_pci_tbl,
8546         .probe          = bnx2_init_one,
8547         .remove         = __devexit_p(bnx2_remove_one),
8548         .suspend        = bnx2_suspend,
8549         .resume         = bnx2_resume,
8550         .err_handler    = &bnx2_err_handler,
8551 };
8552
8553 static int __init bnx2_init(void)
8554 {
8555         return pci_register_driver(&bnx2_pci_driver);
8556 }
8557
8558 static void __exit bnx2_cleanup(void)
8559 {
8560         pci_unregister_driver(&bnx2_pci_driver);
8561 }
8562
8563 module_init(bnx2_init);
8564 module_exit(bnx2_cleanup);
8565
8566
8567