1 /* bnx2.c: Broadcom NX2 network driver.
3 * Copyright (c) 2004-2010 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Written by: Michael Chan (mchan@broadcom.com)
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/module.h>
15 #include <linux/moduleparam.h>
17 #include <linux/kernel.h>
18 #include <linux/timer.h>
19 #include <linux/errno.h>
20 #include <linux/ioport.h>
21 #include <linux/slab.h>
22 #include <linux/vmalloc.h>
23 #include <linux/interrupt.h>
24 #include <linux/pci.h>
25 #include <linux/init.h>
26 #include <linux/netdevice.h>
27 #include <linux/etherdevice.h>
28 #include <linux/skbuff.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/bitops.h>
33 #include <linux/delay.h>
34 #include <asm/byteorder.h>
36 #include <linux/time.h>
37 #include <linux/ethtool.h>
38 #include <linux/mii.h>
39 #include <linux/if_vlan.h>
40 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
45 #include <net/checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/prefetch.h>
49 #include <linux/cache.h>
50 #include <linux/firmware.h>
51 #include <linux/log2.h>
53 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
60 #define DRV_MODULE_NAME "bnx2"
61 #define DRV_MODULE_VERSION "2.0.15"
62 #define DRV_MODULE_RELDATE "May 4, 2010"
63 #define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j6.fw"
64 #define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
65 #define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j15.fw"
66 #define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-5.0.0.j10.fw"
67 #define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-5.0.0.j10.fw"
69 #define RUN_AT(x) (jiffies + (x))
71 /* Time in jiffies before concluding the transmitter is hung. */
72 #define TX_TIMEOUT (5*HZ)
74 static char version[] __devinitdata =
75 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
77 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
78 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
79 MODULE_LICENSE("GPL");
80 MODULE_VERSION(DRV_MODULE_VERSION);
81 MODULE_FIRMWARE(FW_MIPS_FILE_06);
82 MODULE_FIRMWARE(FW_RV2P_FILE_06);
83 MODULE_FIRMWARE(FW_MIPS_FILE_09);
84 MODULE_FIRMWARE(FW_RV2P_FILE_09);
85 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
87 static int disable_msi = 0;
89 module_param(disable_msi, int, 0);
90 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
106 /* indexed by board_t, above */
109 } board_info[] __devinitdata = {
110 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
111 { "HP NC370T Multifunction Gigabit Server Adapter" },
112 { "HP NC370i Multifunction Gigabit Server Adapter" },
113 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
114 { "HP NC370F Multifunction Gigabit Server Adapter" },
115 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
116 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
117 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
118 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
119 { "Broadcom NetXtreme II BCM5716 1000Base-T" },
120 { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
123 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
125 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
126 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
127 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
128 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
130 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
132 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
133 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
134 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
135 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
136 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
137 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
138 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
139 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
140 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
141 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
142 { PCI_VENDOR_ID_BROADCOM, 0x163b,
143 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
144 { PCI_VENDOR_ID_BROADCOM, 0x163c,
145 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
149 static const struct flash_spec flash_table[] =
151 #define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
152 #define NONBUFFERED_FLAGS (BNX2_NV_WREN)
154 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
155 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
156 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
158 /* Expansion entry 0001 */
159 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
160 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
161 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
163 /* Saifun SA25F010 (non-buffered flash) */
164 /* strap, cfg1, & write1 need updates */
165 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
166 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
167 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
168 "Non-buffered flash (128kB)"},
169 /* Saifun SA25F020 (non-buffered flash) */
170 /* strap, cfg1, & write1 need updates */
171 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
172 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
173 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
174 "Non-buffered flash (256kB)"},
175 /* Expansion entry 0100 */
176 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
177 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
178 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
180 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
181 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
182 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
183 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
184 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
185 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
186 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
187 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
188 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
189 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
190 /* Saifun SA25F005 (non-buffered flash) */
191 /* strap, cfg1, & write1 need updates */
192 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
193 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
195 "Non-buffered flash (64kB)"},
197 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
198 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
199 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
201 /* Expansion entry 1001 */
202 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
204 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
206 /* Expansion entry 1010 */
207 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
208 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
209 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
211 /* ATMEL AT45DB011B (buffered flash) */
212 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
213 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
214 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
215 "Buffered flash (128kB)"},
216 /* Expansion entry 1100 */
217 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
218 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
219 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
221 /* Expansion entry 1101 */
222 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
223 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
224 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
226 /* Ateml Expansion entry 1110 */
227 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
228 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
229 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
230 "Entry 1110 (Atmel)"},
231 /* ATMEL AT45DB021B (buffered flash) */
232 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
233 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
234 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
235 "Buffered flash (256kB)"},
238 static const struct flash_spec flash_5709 = {
239 .flags = BNX2_NV_BUFFERED,
240 .page_bits = BCM5709_FLASH_PAGE_BITS,
241 .page_size = BCM5709_FLASH_PAGE_SIZE,
242 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
243 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
244 .name = "5709 Buffered flash (256kB)",
247 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
249 static void bnx2_init_napi(struct bnx2 *bp);
250 static void bnx2_del_napi(struct bnx2 *bp);
252 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
258 /* The ring uses 256 indices for 255 entries, one of them
259 * needs to be skipped.
261 diff = txr->tx_prod - txr->tx_cons;
262 if (unlikely(diff >= TX_DESC_CNT)) {
264 if (diff == TX_DESC_CNT)
265 diff = MAX_TX_DESC_CNT;
267 return (bp->tx_ring_size - diff);
271 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
275 spin_lock_bh(&bp->indirect_lock);
276 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
277 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
278 spin_unlock_bh(&bp->indirect_lock);
283 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
285 spin_lock_bh(&bp->indirect_lock);
286 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
287 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
288 spin_unlock_bh(&bp->indirect_lock);
292 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
294 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
298 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
300 return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
304 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
307 spin_lock_bh(&bp->indirect_lock);
308 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
311 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
312 REG_WR(bp, BNX2_CTX_CTX_CTRL,
313 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
314 for (i = 0; i < 5; i++) {
315 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
316 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
321 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
322 REG_WR(bp, BNX2_CTX_DATA, val);
324 spin_unlock_bh(&bp->indirect_lock);
329 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
331 struct bnx2 *bp = netdev_priv(dev);
332 struct drv_ctl_io *io = &info->data.io;
335 case DRV_CTL_IO_WR_CMD:
336 bnx2_reg_wr_ind(bp, io->offset, io->data);
338 case DRV_CTL_IO_RD_CMD:
339 io->data = bnx2_reg_rd_ind(bp, io->offset);
341 case DRV_CTL_CTX_WR_CMD:
342 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
350 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
352 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
353 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
356 if (bp->flags & BNX2_FLAG_USING_MSIX) {
357 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
358 bnapi->cnic_present = 0;
359 sb_id = bp->irq_nvecs;
360 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
362 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
363 bnapi->cnic_tag = bnapi->last_status_idx;
364 bnapi->cnic_present = 1;
366 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
369 cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
370 cp->irq_arr[0].status_blk = (void *)
371 ((unsigned long) bnapi->status_blk.msi +
372 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
373 cp->irq_arr[0].status_blk_num = sb_id;
377 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
380 struct bnx2 *bp = netdev_priv(dev);
381 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
386 if (cp->drv_state & CNIC_DRV_STATE_REGD)
389 bp->cnic_data = data;
390 rcu_assign_pointer(bp->cnic_ops, ops);
393 cp->drv_state = CNIC_DRV_STATE_REGD;
395 bnx2_setup_cnic_irq_info(bp);
400 static int bnx2_unregister_cnic(struct net_device *dev)
402 struct bnx2 *bp = netdev_priv(dev);
403 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
404 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
406 mutex_lock(&bp->cnic_lock);
408 bnapi->cnic_present = 0;
409 rcu_assign_pointer(bp->cnic_ops, NULL);
410 mutex_unlock(&bp->cnic_lock);
415 struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
417 struct bnx2 *bp = netdev_priv(dev);
418 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
420 cp->drv_owner = THIS_MODULE;
421 cp->chip_id = bp->chip_id;
423 cp->io_base = bp->regview;
424 cp->drv_ctl = bnx2_drv_ctl;
425 cp->drv_register_cnic = bnx2_register_cnic;
426 cp->drv_unregister_cnic = bnx2_unregister_cnic;
430 EXPORT_SYMBOL(bnx2_cnic_probe);
433 bnx2_cnic_stop(struct bnx2 *bp)
435 struct cnic_ops *c_ops;
436 struct cnic_ctl_info info;
438 mutex_lock(&bp->cnic_lock);
439 c_ops = bp->cnic_ops;
441 info.cmd = CNIC_CTL_STOP_CMD;
442 c_ops->cnic_ctl(bp->cnic_data, &info);
444 mutex_unlock(&bp->cnic_lock);
448 bnx2_cnic_start(struct bnx2 *bp)
450 struct cnic_ops *c_ops;
451 struct cnic_ctl_info info;
453 mutex_lock(&bp->cnic_lock);
454 c_ops = bp->cnic_ops;
456 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
457 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
459 bnapi->cnic_tag = bnapi->last_status_idx;
461 info.cmd = CNIC_CTL_START_CMD;
462 c_ops->cnic_ctl(bp->cnic_data, &info);
464 mutex_unlock(&bp->cnic_lock);
470 bnx2_cnic_stop(struct bnx2 *bp)
475 bnx2_cnic_start(struct bnx2 *bp)
482 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
487 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
488 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
489 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
491 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
492 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
497 val1 = (bp->phy_addr << 21) | (reg << 16) |
498 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
499 BNX2_EMAC_MDIO_COMM_START_BUSY;
500 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
502 for (i = 0; i < 50; i++) {
505 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
506 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
509 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
510 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
516 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
525 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
526 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
527 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
529 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
530 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
539 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
544 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
545 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
546 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
548 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
549 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
554 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
555 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
556 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
557 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
559 for (i = 0; i < 50; i++) {
562 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
563 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
569 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
574 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
575 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
576 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
578 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
579 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
588 bnx2_disable_int(struct bnx2 *bp)
591 struct bnx2_napi *bnapi;
593 for (i = 0; i < bp->irq_nvecs; i++) {
594 bnapi = &bp->bnx2_napi[i];
595 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
596 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
598 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
602 bnx2_enable_int(struct bnx2 *bp)
605 struct bnx2_napi *bnapi;
607 for (i = 0; i < bp->irq_nvecs; i++) {
608 bnapi = &bp->bnx2_napi[i];
610 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
611 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
612 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
613 bnapi->last_status_idx);
615 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
616 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
617 bnapi->last_status_idx);
619 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
623 bnx2_disable_int_sync(struct bnx2 *bp)
627 atomic_inc(&bp->intr_sem);
628 if (!netif_running(bp->dev))
631 bnx2_disable_int(bp);
632 for (i = 0; i < bp->irq_nvecs; i++)
633 synchronize_irq(bp->irq_tbl[i].vector);
637 bnx2_napi_disable(struct bnx2 *bp)
641 for (i = 0; i < bp->irq_nvecs; i++)
642 napi_disable(&bp->bnx2_napi[i].napi);
646 bnx2_napi_enable(struct bnx2 *bp)
650 for (i = 0; i < bp->irq_nvecs; i++)
651 napi_enable(&bp->bnx2_napi[i].napi);
655 bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
659 if (netif_running(bp->dev)) {
660 bnx2_napi_disable(bp);
661 netif_tx_disable(bp->dev);
663 bnx2_disable_int_sync(bp);
664 netif_carrier_off(bp->dev); /* prevent tx timeout */
668 bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
670 if (atomic_dec_and_test(&bp->intr_sem)) {
671 if (netif_running(bp->dev)) {
672 netif_tx_wake_all_queues(bp->dev);
673 spin_lock_bh(&bp->phy_lock);
675 netif_carrier_on(bp->dev);
676 spin_unlock_bh(&bp->phy_lock);
677 bnx2_napi_enable(bp);
686 bnx2_free_tx_mem(struct bnx2 *bp)
690 for (i = 0; i < bp->num_tx_rings; i++) {
691 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
692 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
694 if (txr->tx_desc_ring) {
695 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
697 txr->tx_desc_mapping);
698 txr->tx_desc_ring = NULL;
700 kfree(txr->tx_buf_ring);
701 txr->tx_buf_ring = NULL;
706 bnx2_free_rx_mem(struct bnx2 *bp)
710 for (i = 0; i < bp->num_rx_rings; i++) {
711 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
712 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
715 for (j = 0; j < bp->rx_max_ring; j++) {
716 if (rxr->rx_desc_ring[j])
717 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
718 rxr->rx_desc_ring[j],
719 rxr->rx_desc_mapping[j]);
720 rxr->rx_desc_ring[j] = NULL;
722 vfree(rxr->rx_buf_ring);
723 rxr->rx_buf_ring = NULL;
725 for (j = 0; j < bp->rx_max_pg_ring; j++) {
726 if (rxr->rx_pg_desc_ring[j])
727 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
728 rxr->rx_pg_desc_ring[j],
729 rxr->rx_pg_desc_mapping[j]);
730 rxr->rx_pg_desc_ring[j] = NULL;
732 vfree(rxr->rx_pg_ring);
733 rxr->rx_pg_ring = NULL;
738 bnx2_alloc_tx_mem(struct bnx2 *bp)
742 for (i = 0; i < bp->num_tx_rings; i++) {
743 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
744 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
746 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
747 if (txr->tx_buf_ring == NULL)
751 pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
752 &txr->tx_desc_mapping);
753 if (txr->tx_desc_ring == NULL)
760 bnx2_alloc_rx_mem(struct bnx2 *bp)
764 for (i = 0; i < bp->num_rx_rings; i++) {
765 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
766 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
770 vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
771 if (rxr->rx_buf_ring == NULL)
774 memset(rxr->rx_buf_ring, 0,
775 SW_RXBD_RING_SIZE * bp->rx_max_ring);
777 for (j = 0; j < bp->rx_max_ring; j++) {
778 rxr->rx_desc_ring[j] =
779 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
780 &rxr->rx_desc_mapping[j]);
781 if (rxr->rx_desc_ring[j] == NULL)
786 if (bp->rx_pg_ring_size) {
787 rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
789 if (rxr->rx_pg_ring == NULL)
792 memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
796 for (j = 0; j < bp->rx_max_pg_ring; j++) {
797 rxr->rx_pg_desc_ring[j] =
798 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
799 &rxr->rx_pg_desc_mapping[j]);
800 if (rxr->rx_pg_desc_ring[j] == NULL)
809 bnx2_free_mem(struct bnx2 *bp)
812 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
814 bnx2_free_tx_mem(bp);
815 bnx2_free_rx_mem(bp);
817 for (i = 0; i < bp->ctx_pages; i++) {
818 if (bp->ctx_blk[i]) {
819 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
821 bp->ctx_blk_mapping[i]);
822 bp->ctx_blk[i] = NULL;
825 if (bnapi->status_blk.msi) {
826 pci_free_consistent(bp->pdev, bp->status_stats_size,
827 bnapi->status_blk.msi,
828 bp->status_blk_mapping);
829 bnapi->status_blk.msi = NULL;
830 bp->stats_blk = NULL;
835 bnx2_alloc_mem(struct bnx2 *bp)
837 int i, status_blk_size, err;
838 struct bnx2_napi *bnapi;
841 /* Combine status and statistics blocks into one allocation. */
842 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
843 if (bp->flags & BNX2_FLAG_MSIX_CAP)
844 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
845 BNX2_SBLK_MSIX_ALIGN_SIZE);
846 bp->status_stats_size = status_blk_size +
847 sizeof(struct statistics_block);
849 status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
850 &bp->status_blk_mapping);
851 if (status_blk == NULL)
854 memset(status_blk, 0, bp->status_stats_size);
856 bnapi = &bp->bnx2_napi[0];
857 bnapi->status_blk.msi = status_blk;
858 bnapi->hw_tx_cons_ptr =
859 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
860 bnapi->hw_rx_cons_ptr =
861 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
862 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
863 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
864 struct status_block_msix *sblk;
866 bnapi = &bp->bnx2_napi[i];
868 sblk = (void *) (status_blk +
869 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
870 bnapi->status_blk.msix = sblk;
871 bnapi->hw_tx_cons_ptr =
872 &sblk->status_tx_quick_consumer_index;
873 bnapi->hw_rx_cons_ptr =
874 &sblk->status_rx_quick_consumer_index;
875 bnapi->int_num = i << 24;
879 bp->stats_blk = status_blk + status_blk_size;
881 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
883 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
884 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
885 if (bp->ctx_pages == 0)
887 for (i = 0; i < bp->ctx_pages; i++) {
888 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
890 &bp->ctx_blk_mapping[i]);
891 if (bp->ctx_blk[i] == NULL)
896 err = bnx2_alloc_rx_mem(bp);
900 err = bnx2_alloc_tx_mem(bp);
912 bnx2_report_fw_link(struct bnx2 *bp)
914 u32 fw_link_status = 0;
916 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
922 switch (bp->line_speed) {
924 if (bp->duplex == DUPLEX_HALF)
925 fw_link_status = BNX2_LINK_STATUS_10HALF;
927 fw_link_status = BNX2_LINK_STATUS_10FULL;
930 if (bp->duplex == DUPLEX_HALF)
931 fw_link_status = BNX2_LINK_STATUS_100HALF;
933 fw_link_status = BNX2_LINK_STATUS_100FULL;
936 if (bp->duplex == DUPLEX_HALF)
937 fw_link_status = BNX2_LINK_STATUS_1000HALF;
939 fw_link_status = BNX2_LINK_STATUS_1000FULL;
942 if (bp->duplex == DUPLEX_HALF)
943 fw_link_status = BNX2_LINK_STATUS_2500HALF;
945 fw_link_status = BNX2_LINK_STATUS_2500FULL;
949 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
952 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
954 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
955 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
957 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
958 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
959 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
961 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
965 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
967 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
971 bnx2_xceiver_str(struct bnx2 *bp)
973 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
974 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
979 bnx2_report_link(struct bnx2 *bp)
982 netif_carrier_on(bp->dev);
983 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
984 bnx2_xceiver_str(bp),
986 bp->duplex == DUPLEX_FULL ? "full" : "half");
989 if (bp->flow_ctrl & FLOW_CTRL_RX) {
990 pr_cont(", receive ");
991 if (bp->flow_ctrl & FLOW_CTRL_TX)
992 pr_cont("& transmit ");
995 pr_cont(", transmit ");
997 pr_cont("flow control ON");
1001 netif_carrier_off(bp->dev);
1002 netdev_err(bp->dev, "NIC %s Link is Down\n",
1003 bnx2_xceiver_str(bp));
1006 bnx2_report_fw_link(bp);
1010 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1012 u32 local_adv, remote_adv;
1015 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1016 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1018 if (bp->duplex == DUPLEX_FULL) {
1019 bp->flow_ctrl = bp->req_flow_ctrl;
1024 if (bp->duplex != DUPLEX_FULL) {
1028 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1029 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1032 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1033 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1034 bp->flow_ctrl |= FLOW_CTRL_TX;
1035 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1036 bp->flow_ctrl |= FLOW_CTRL_RX;
1040 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1041 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1043 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1044 u32 new_local_adv = 0;
1045 u32 new_remote_adv = 0;
1047 if (local_adv & ADVERTISE_1000XPAUSE)
1048 new_local_adv |= ADVERTISE_PAUSE_CAP;
1049 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1050 new_local_adv |= ADVERTISE_PAUSE_ASYM;
1051 if (remote_adv & ADVERTISE_1000XPAUSE)
1052 new_remote_adv |= ADVERTISE_PAUSE_CAP;
1053 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1054 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1056 local_adv = new_local_adv;
1057 remote_adv = new_remote_adv;
1060 /* See Table 28B-3 of 802.3ab-1999 spec. */
1061 if (local_adv & ADVERTISE_PAUSE_CAP) {
1062 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1063 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1064 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1066 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1067 bp->flow_ctrl = FLOW_CTRL_RX;
1071 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1072 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1076 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1077 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1078 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1080 bp->flow_ctrl = FLOW_CTRL_TX;
1086 bnx2_5709s_linkup(struct bnx2 *bp)
1092 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1093 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1094 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1096 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1097 bp->line_speed = bp->req_line_speed;
1098 bp->duplex = bp->req_duplex;
1101 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1103 case MII_BNX2_GP_TOP_AN_SPEED_10:
1104 bp->line_speed = SPEED_10;
1106 case MII_BNX2_GP_TOP_AN_SPEED_100:
1107 bp->line_speed = SPEED_100;
1109 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1110 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1111 bp->line_speed = SPEED_1000;
1113 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1114 bp->line_speed = SPEED_2500;
1117 if (val & MII_BNX2_GP_TOP_AN_FD)
1118 bp->duplex = DUPLEX_FULL;
1120 bp->duplex = DUPLEX_HALF;
1125 bnx2_5708s_linkup(struct bnx2 *bp)
1130 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1131 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1132 case BCM5708S_1000X_STAT1_SPEED_10:
1133 bp->line_speed = SPEED_10;
1135 case BCM5708S_1000X_STAT1_SPEED_100:
1136 bp->line_speed = SPEED_100;
1138 case BCM5708S_1000X_STAT1_SPEED_1G:
1139 bp->line_speed = SPEED_1000;
1141 case BCM5708S_1000X_STAT1_SPEED_2G5:
1142 bp->line_speed = SPEED_2500;
1145 if (val & BCM5708S_1000X_STAT1_FD)
1146 bp->duplex = DUPLEX_FULL;
1148 bp->duplex = DUPLEX_HALF;
1154 bnx2_5706s_linkup(struct bnx2 *bp)
1156 u32 bmcr, local_adv, remote_adv, common;
1159 bp->line_speed = SPEED_1000;
1161 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1162 if (bmcr & BMCR_FULLDPLX) {
1163 bp->duplex = DUPLEX_FULL;
1166 bp->duplex = DUPLEX_HALF;
1169 if (!(bmcr & BMCR_ANENABLE)) {
1173 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1174 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1176 common = local_adv & remote_adv;
1177 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1179 if (common & ADVERTISE_1000XFULL) {
1180 bp->duplex = DUPLEX_FULL;
1183 bp->duplex = DUPLEX_HALF;
1191 bnx2_copper_linkup(struct bnx2 *bp)
1195 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1196 if (bmcr & BMCR_ANENABLE) {
1197 u32 local_adv, remote_adv, common;
1199 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1200 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1202 common = local_adv & (remote_adv >> 2);
1203 if (common & ADVERTISE_1000FULL) {
1204 bp->line_speed = SPEED_1000;
1205 bp->duplex = DUPLEX_FULL;
1207 else if (common & ADVERTISE_1000HALF) {
1208 bp->line_speed = SPEED_1000;
1209 bp->duplex = DUPLEX_HALF;
1212 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1213 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1215 common = local_adv & remote_adv;
1216 if (common & ADVERTISE_100FULL) {
1217 bp->line_speed = SPEED_100;
1218 bp->duplex = DUPLEX_FULL;
1220 else if (common & ADVERTISE_100HALF) {
1221 bp->line_speed = SPEED_100;
1222 bp->duplex = DUPLEX_HALF;
1224 else if (common & ADVERTISE_10FULL) {
1225 bp->line_speed = SPEED_10;
1226 bp->duplex = DUPLEX_FULL;
1228 else if (common & ADVERTISE_10HALF) {
1229 bp->line_speed = SPEED_10;
1230 bp->duplex = DUPLEX_HALF;
1239 if (bmcr & BMCR_SPEED100) {
1240 bp->line_speed = SPEED_100;
1243 bp->line_speed = SPEED_10;
1245 if (bmcr & BMCR_FULLDPLX) {
1246 bp->duplex = DUPLEX_FULL;
1249 bp->duplex = DUPLEX_HALF;
1257 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1259 u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1261 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1262 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1265 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1266 u32 lo_water, hi_water;
1268 if (bp->flow_ctrl & FLOW_CTRL_TX)
1269 lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1271 lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1272 if (lo_water >= bp->rx_ring_size)
1275 hi_water = min_t(int, bp->rx_ring_size / 4, lo_water + 16);
1277 if (hi_water <= lo_water)
1280 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1281 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1285 else if (hi_water == 0)
1287 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1289 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1293 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1298 for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1301 bnx2_init_rx_context(bp, cid);
1306 bnx2_set_mac_link(struct bnx2 *bp)
1310 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1311 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1312 (bp->duplex == DUPLEX_HALF)) {
1313 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1316 /* Configure the EMAC mode register. */
1317 val = REG_RD(bp, BNX2_EMAC_MODE);
1319 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1320 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1321 BNX2_EMAC_MODE_25G_MODE);
1324 switch (bp->line_speed) {
1326 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1327 val |= BNX2_EMAC_MODE_PORT_MII_10M;
1332 val |= BNX2_EMAC_MODE_PORT_MII;
1335 val |= BNX2_EMAC_MODE_25G_MODE;
1338 val |= BNX2_EMAC_MODE_PORT_GMII;
1343 val |= BNX2_EMAC_MODE_PORT_GMII;
1346 /* Set the MAC to operate in the appropriate duplex mode. */
1347 if (bp->duplex == DUPLEX_HALF)
1348 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1349 REG_WR(bp, BNX2_EMAC_MODE, val);
1351 /* Enable/disable rx PAUSE. */
1352 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1354 if (bp->flow_ctrl & FLOW_CTRL_RX)
1355 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1356 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1358 /* Enable/disable tx PAUSE. */
1359 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1360 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1362 if (bp->flow_ctrl & FLOW_CTRL_TX)
1363 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1364 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1366 /* Acknowledge the interrupt. */
1367 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1369 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1370 bnx2_init_all_rx_contexts(bp);
1374 bnx2_enable_bmsr1(struct bnx2 *bp)
1376 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1377 (CHIP_NUM(bp) == CHIP_NUM_5709))
1378 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1379 MII_BNX2_BLK_ADDR_GP_STATUS);
1383 bnx2_disable_bmsr1(struct bnx2 *bp)
1385 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1386 (CHIP_NUM(bp) == CHIP_NUM_5709))
1387 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1388 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1392 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1397 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1400 if (bp->autoneg & AUTONEG_SPEED)
1401 bp->advertising |= ADVERTISED_2500baseX_Full;
1403 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1404 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1406 bnx2_read_phy(bp, bp->mii_up1, &up1);
1407 if (!(up1 & BCM5708S_UP1_2G5)) {
1408 up1 |= BCM5708S_UP1_2G5;
1409 bnx2_write_phy(bp, bp->mii_up1, up1);
1413 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1414 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1415 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1421 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1426 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1429 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1430 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1432 bnx2_read_phy(bp, bp->mii_up1, &up1);
1433 if (up1 & BCM5708S_UP1_2G5) {
1434 up1 &= ~BCM5708S_UP1_2G5;
1435 bnx2_write_phy(bp, bp->mii_up1, up1);
1439 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1440 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1441 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1447 bnx2_enable_forced_2g5(struct bnx2 *bp)
1449 u32 uninitialized_var(bmcr);
1452 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1455 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1458 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1459 MII_BNX2_BLK_ADDR_SERDES_DIG);
1460 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1461 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1462 val |= MII_BNX2_SD_MISC1_FORCE |
1463 MII_BNX2_SD_MISC1_FORCE_2_5G;
1464 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1467 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1468 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1469 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1471 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1472 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1474 bmcr |= BCM5708S_BMCR_FORCE_2500;
1482 if (bp->autoneg & AUTONEG_SPEED) {
1483 bmcr &= ~BMCR_ANENABLE;
1484 if (bp->req_duplex == DUPLEX_FULL)
1485 bmcr |= BMCR_FULLDPLX;
1487 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1491 bnx2_disable_forced_2g5(struct bnx2 *bp)
1493 u32 uninitialized_var(bmcr);
1496 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1499 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1502 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1503 MII_BNX2_BLK_ADDR_SERDES_DIG);
1504 if (!bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val)) {
1505 val &= ~MII_BNX2_SD_MISC1_FORCE;
1506 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1509 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1510 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1511 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1513 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1514 err = bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1516 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1524 if (bp->autoneg & AUTONEG_SPEED)
1525 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1526 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1530 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1534 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1535 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1537 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1539 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1543 bnx2_set_link(struct bnx2 *bp)
1548 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1553 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1556 link_up = bp->link_up;
1558 bnx2_enable_bmsr1(bp);
1559 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1560 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1561 bnx2_disable_bmsr1(bp);
1563 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1564 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1567 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1568 bnx2_5706s_force_link_dn(bp, 0);
1569 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1571 val = REG_RD(bp, BNX2_EMAC_STATUS);
1573 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1574 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1575 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1577 if ((val & BNX2_EMAC_STATUS_LINK) &&
1578 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1579 bmsr |= BMSR_LSTATUS;
1581 bmsr &= ~BMSR_LSTATUS;
1584 if (bmsr & BMSR_LSTATUS) {
1587 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1588 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1589 bnx2_5706s_linkup(bp);
1590 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1591 bnx2_5708s_linkup(bp);
1592 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1593 bnx2_5709s_linkup(bp);
1596 bnx2_copper_linkup(bp);
1598 bnx2_resolve_flow_ctrl(bp);
1601 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1602 (bp->autoneg & AUTONEG_SPEED))
1603 bnx2_disable_forced_2g5(bp);
1605 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1608 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1609 bmcr |= BMCR_ANENABLE;
1610 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1612 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1617 if (bp->link_up != link_up) {
1618 bnx2_report_link(bp);
1621 bnx2_set_mac_link(bp);
1627 bnx2_reset_phy(struct bnx2 *bp)
1632 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1634 #define PHY_RESET_MAX_WAIT 100
1635 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1638 bnx2_read_phy(bp, bp->mii_bmcr, ®);
1639 if (!(reg & BMCR_RESET)) {
1644 if (i == PHY_RESET_MAX_WAIT) {
1651 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1655 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1656 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1658 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1659 adv = ADVERTISE_1000XPAUSE;
1662 adv = ADVERTISE_PAUSE_CAP;
1665 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1666 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1667 adv = ADVERTISE_1000XPSE_ASYM;
1670 adv = ADVERTISE_PAUSE_ASYM;
1673 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1674 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1675 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1678 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1684 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1687 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1688 __releases(&bp->phy_lock)
1689 __acquires(&bp->phy_lock)
1691 u32 speed_arg = 0, pause_adv;
1693 pause_adv = bnx2_phy_get_pause_adv(bp);
1695 if (bp->autoneg & AUTONEG_SPEED) {
1696 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1697 if (bp->advertising & ADVERTISED_10baseT_Half)
1698 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1699 if (bp->advertising & ADVERTISED_10baseT_Full)
1700 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1701 if (bp->advertising & ADVERTISED_100baseT_Half)
1702 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1703 if (bp->advertising & ADVERTISED_100baseT_Full)
1704 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1705 if (bp->advertising & ADVERTISED_1000baseT_Full)
1706 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1707 if (bp->advertising & ADVERTISED_2500baseX_Full)
1708 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1710 if (bp->req_line_speed == SPEED_2500)
1711 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1712 else if (bp->req_line_speed == SPEED_1000)
1713 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1714 else if (bp->req_line_speed == SPEED_100) {
1715 if (bp->req_duplex == DUPLEX_FULL)
1716 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1718 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1719 } else if (bp->req_line_speed == SPEED_10) {
1720 if (bp->req_duplex == DUPLEX_FULL)
1721 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1723 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1727 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1728 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1729 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1730 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1732 if (port == PORT_TP)
1733 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1734 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1736 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1738 spin_unlock_bh(&bp->phy_lock);
1739 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1740 spin_lock_bh(&bp->phy_lock);
1746 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1747 __releases(&bp->phy_lock)
1748 __acquires(&bp->phy_lock)
1753 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1754 return (bnx2_setup_remote_phy(bp, port));
1756 if (!(bp->autoneg & AUTONEG_SPEED)) {
1758 int force_link_down = 0;
1760 if (bp->req_line_speed == SPEED_2500) {
1761 if (!bnx2_test_and_enable_2g5(bp))
1762 force_link_down = 1;
1763 } else if (bp->req_line_speed == SPEED_1000) {
1764 if (bnx2_test_and_disable_2g5(bp))
1765 force_link_down = 1;
1767 bnx2_read_phy(bp, bp->mii_adv, &adv);
1768 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1770 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1771 new_bmcr = bmcr & ~BMCR_ANENABLE;
1772 new_bmcr |= BMCR_SPEED1000;
1774 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1775 if (bp->req_line_speed == SPEED_2500)
1776 bnx2_enable_forced_2g5(bp);
1777 else if (bp->req_line_speed == SPEED_1000) {
1778 bnx2_disable_forced_2g5(bp);
1779 new_bmcr &= ~0x2000;
1782 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1783 if (bp->req_line_speed == SPEED_2500)
1784 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1786 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1789 if (bp->req_duplex == DUPLEX_FULL) {
1790 adv |= ADVERTISE_1000XFULL;
1791 new_bmcr |= BMCR_FULLDPLX;
1794 adv |= ADVERTISE_1000XHALF;
1795 new_bmcr &= ~BMCR_FULLDPLX;
1797 if ((new_bmcr != bmcr) || (force_link_down)) {
1798 /* Force a link down visible on the other side */
1800 bnx2_write_phy(bp, bp->mii_adv, adv &
1801 ~(ADVERTISE_1000XFULL |
1802 ADVERTISE_1000XHALF));
1803 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1804 BMCR_ANRESTART | BMCR_ANENABLE);
1807 netif_carrier_off(bp->dev);
1808 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1809 bnx2_report_link(bp);
1811 bnx2_write_phy(bp, bp->mii_adv, adv);
1812 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1814 bnx2_resolve_flow_ctrl(bp);
1815 bnx2_set_mac_link(bp);
1820 bnx2_test_and_enable_2g5(bp);
1822 if (bp->advertising & ADVERTISED_1000baseT_Full)
1823 new_adv |= ADVERTISE_1000XFULL;
1825 new_adv |= bnx2_phy_get_pause_adv(bp);
1827 bnx2_read_phy(bp, bp->mii_adv, &adv);
1828 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1830 bp->serdes_an_pending = 0;
1831 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1832 /* Force a link down visible on the other side */
1834 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1835 spin_unlock_bh(&bp->phy_lock);
1837 spin_lock_bh(&bp->phy_lock);
1840 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1841 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1843 /* Speed up link-up time when the link partner
1844 * does not autonegotiate which is very common
1845 * in blade servers. Some blade servers use
1846 * IPMI for kerboard input and it's important
1847 * to minimize link disruptions. Autoneg. involves
1848 * exchanging base pages plus 3 next pages and
1849 * normally completes in about 120 msec.
1851 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1852 bp->serdes_an_pending = 1;
1853 mod_timer(&bp->timer, jiffies + bp->current_interval);
1855 bnx2_resolve_flow_ctrl(bp);
1856 bnx2_set_mac_link(bp);
1862 #define ETHTOOL_ALL_FIBRE_SPEED \
1863 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
1864 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1865 (ADVERTISED_1000baseT_Full)
1867 #define ETHTOOL_ALL_COPPER_SPEED \
1868 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1869 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1870 ADVERTISED_1000baseT_Full)
1872 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1873 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1875 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1878 bnx2_set_default_remote_link(struct bnx2 *bp)
1882 if (bp->phy_port == PORT_TP)
1883 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1885 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1887 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1888 bp->req_line_speed = 0;
1889 bp->autoneg |= AUTONEG_SPEED;
1890 bp->advertising = ADVERTISED_Autoneg;
1891 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1892 bp->advertising |= ADVERTISED_10baseT_Half;
1893 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1894 bp->advertising |= ADVERTISED_10baseT_Full;
1895 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1896 bp->advertising |= ADVERTISED_100baseT_Half;
1897 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1898 bp->advertising |= ADVERTISED_100baseT_Full;
1899 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1900 bp->advertising |= ADVERTISED_1000baseT_Full;
1901 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1902 bp->advertising |= ADVERTISED_2500baseX_Full;
1905 bp->advertising = 0;
1906 bp->req_duplex = DUPLEX_FULL;
1907 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1908 bp->req_line_speed = SPEED_10;
1909 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1910 bp->req_duplex = DUPLEX_HALF;
1912 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1913 bp->req_line_speed = SPEED_100;
1914 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1915 bp->req_duplex = DUPLEX_HALF;
1917 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1918 bp->req_line_speed = SPEED_1000;
1919 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1920 bp->req_line_speed = SPEED_2500;
1925 bnx2_set_default_link(struct bnx2 *bp)
1927 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1928 bnx2_set_default_remote_link(bp);
1932 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1933 bp->req_line_speed = 0;
1934 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1937 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1939 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1940 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1941 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1943 bp->req_line_speed = bp->line_speed = SPEED_1000;
1944 bp->req_duplex = DUPLEX_FULL;
1947 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1951 bnx2_send_heart_beat(struct bnx2 *bp)
1956 spin_lock(&bp->indirect_lock);
1957 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1958 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1959 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1960 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1961 spin_unlock(&bp->indirect_lock);
1965 bnx2_remote_phy_event(struct bnx2 *bp)
1968 u8 link_up = bp->link_up;
1971 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1973 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1974 bnx2_send_heart_beat(bp);
1976 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1978 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1984 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1985 bp->duplex = DUPLEX_FULL;
1987 case BNX2_LINK_STATUS_10HALF:
1988 bp->duplex = DUPLEX_HALF;
1989 case BNX2_LINK_STATUS_10FULL:
1990 bp->line_speed = SPEED_10;
1992 case BNX2_LINK_STATUS_100HALF:
1993 bp->duplex = DUPLEX_HALF;
1994 case BNX2_LINK_STATUS_100BASE_T4:
1995 case BNX2_LINK_STATUS_100FULL:
1996 bp->line_speed = SPEED_100;
1998 case BNX2_LINK_STATUS_1000HALF:
1999 bp->duplex = DUPLEX_HALF;
2000 case BNX2_LINK_STATUS_1000FULL:
2001 bp->line_speed = SPEED_1000;
2003 case BNX2_LINK_STATUS_2500HALF:
2004 bp->duplex = DUPLEX_HALF;
2005 case BNX2_LINK_STATUS_2500FULL:
2006 bp->line_speed = SPEED_2500;
2014 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2015 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2016 if (bp->duplex == DUPLEX_FULL)
2017 bp->flow_ctrl = bp->req_flow_ctrl;
2019 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2020 bp->flow_ctrl |= FLOW_CTRL_TX;
2021 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2022 bp->flow_ctrl |= FLOW_CTRL_RX;
2025 old_port = bp->phy_port;
2026 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2027 bp->phy_port = PORT_FIBRE;
2029 bp->phy_port = PORT_TP;
2031 if (old_port != bp->phy_port)
2032 bnx2_set_default_link(bp);
2035 if (bp->link_up != link_up)
2036 bnx2_report_link(bp);
2038 bnx2_set_mac_link(bp);
2042 bnx2_set_remote_link(struct bnx2 *bp)
2046 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2048 case BNX2_FW_EVT_CODE_LINK_EVENT:
2049 bnx2_remote_phy_event(bp);
2051 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2053 bnx2_send_heart_beat(bp);
2060 bnx2_setup_copper_phy(struct bnx2 *bp)
2061 __releases(&bp->phy_lock)
2062 __acquires(&bp->phy_lock)
2067 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2069 if (bp->autoneg & AUTONEG_SPEED) {
2070 u32 adv_reg, adv1000_reg;
2071 u32 new_adv_reg = 0;
2072 u32 new_adv1000_reg = 0;
2074 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2075 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2076 ADVERTISE_PAUSE_ASYM);
2078 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2079 adv1000_reg &= PHY_ALL_1000_SPEED;
2081 if (bp->advertising & ADVERTISED_10baseT_Half)
2082 new_adv_reg |= ADVERTISE_10HALF;
2083 if (bp->advertising & ADVERTISED_10baseT_Full)
2084 new_adv_reg |= ADVERTISE_10FULL;
2085 if (bp->advertising & ADVERTISED_100baseT_Half)
2086 new_adv_reg |= ADVERTISE_100HALF;
2087 if (bp->advertising & ADVERTISED_100baseT_Full)
2088 new_adv_reg |= ADVERTISE_100FULL;
2089 if (bp->advertising & ADVERTISED_1000baseT_Full)
2090 new_adv1000_reg |= ADVERTISE_1000FULL;
2092 new_adv_reg |= ADVERTISE_CSMA;
2094 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
2096 if ((adv1000_reg != new_adv1000_reg) ||
2097 (adv_reg != new_adv_reg) ||
2098 ((bmcr & BMCR_ANENABLE) == 0)) {
2100 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
2101 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
2102 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2105 else if (bp->link_up) {
2106 /* Flow ctrl may have changed from auto to forced */
2107 /* or vice-versa. */
2109 bnx2_resolve_flow_ctrl(bp);
2110 bnx2_set_mac_link(bp);
2116 if (bp->req_line_speed == SPEED_100) {
2117 new_bmcr |= BMCR_SPEED100;
2119 if (bp->req_duplex == DUPLEX_FULL) {
2120 new_bmcr |= BMCR_FULLDPLX;
2122 if (new_bmcr != bmcr) {
2125 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2126 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2128 if (bmsr & BMSR_LSTATUS) {
2129 /* Force link down */
2130 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2131 spin_unlock_bh(&bp->phy_lock);
2133 spin_lock_bh(&bp->phy_lock);
2135 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2136 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2139 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2141 /* Normally, the new speed is setup after the link has
2142 * gone down and up again. In some cases, link will not go
2143 * down so we need to set up the new speed here.
2145 if (bmsr & BMSR_LSTATUS) {
2146 bp->line_speed = bp->req_line_speed;
2147 bp->duplex = bp->req_duplex;
2148 bnx2_resolve_flow_ctrl(bp);
2149 bnx2_set_mac_link(bp);
2152 bnx2_resolve_flow_ctrl(bp);
2153 bnx2_set_mac_link(bp);
2159 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2160 __releases(&bp->phy_lock)
2161 __acquires(&bp->phy_lock)
2163 if (bp->loopback == MAC_LOOPBACK)
2166 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2167 return (bnx2_setup_serdes_phy(bp, port));
2170 return (bnx2_setup_copper_phy(bp));
2175 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2179 bp->mii_bmcr = MII_BMCR + 0x10;
2180 bp->mii_bmsr = MII_BMSR + 0x10;
2181 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2182 bp->mii_adv = MII_ADVERTISE + 0x10;
2183 bp->mii_lpa = MII_LPA + 0x10;
2184 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2186 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2187 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2189 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2193 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2195 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2196 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2197 val |= MII_BNX2_SD_1000XCTL1_FIBER;
2198 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2200 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2201 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2202 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2203 val |= BCM5708S_UP1_2G5;
2205 val &= ~BCM5708S_UP1_2G5;
2206 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2208 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2209 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2210 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2211 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2213 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2215 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2216 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2217 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2219 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2225 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2232 bp->mii_up1 = BCM5708S_UP1;
2234 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2235 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2236 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2238 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2239 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2240 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2242 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2243 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2244 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2246 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2247 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2248 val |= BCM5708S_UP1_2G5;
2249 bnx2_write_phy(bp, BCM5708S_UP1, val);
2252 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2253 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2254 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2255 /* increase tx signal amplitude */
2256 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2257 BCM5708S_BLK_ADDR_TX_MISC);
2258 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2259 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2260 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2261 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2264 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2265 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2270 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2271 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2272 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2273 BCM5708S_BLK_ADDR_TX_MISC);
2274 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2275 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2276 BCM5708S_BLK_ADDR_DIG);
2283 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2288 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2290 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2291 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2293 if (bp->dev->mtu > 1500) {
2296 /* Set extended packet length bit */
2297 bnx2_write_phy(bp, 0x18, 0x7);
2298 bnx2_read_phy(bp, 0x18, &val);
2299 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2301 bnx2_write_phy(bp, 0x1c, 0x6c00);
2302 bnx2_read_phy(bp, 0x1c, &val);
2303 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2308 bnx2_write_phy(bp, 0x18, 0x7);
2309 bnx2_read_phy(bp, 0x18, &val);
2310 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2312 bnx2_write_phy(bp, 0x1c, 0x6c00);
2313 bnx2_read_phy(bp, 0x1c, &val);
2314 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2321 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2328 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2329 bnx2_write_phy(bp, 0x18, 0x0c00);
2330 bnx2_write_phy(bp, 0x17, 0x000a);
2331 bnx2_write_phy(bp, 0x15, 0x310b);
2332 bnx2_write_phy(bp, 0x17, 0x201f);
2333 bnx2_write_phy(bp, 0x15, 0x9506);
2334 bnx2_write_phy(bp, 0x17, 0x401f);
2335 bnx2_write_phy(bp, 0x15, 0x14e2);
2336 bnx2_write_phy(bp, 0x18, 0x0400);
2339 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2340 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2341 MII_BNX2_DSP_EXPAND_REG | 0x8);
2342 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2344 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2347 if (bp->dev->mtu > 1500) {
2348 /* Set extended packet length bit */
2349 bnx2_write_phy(bp, 0x18, 0x7);
2350 bnx2_read_phy(bp, 0x18, &val);
2351 bnx2_write_phy(bp, 0x18, val | 0x4000);
2353 bnx2_read_phy(bp, 0x10, &val);
2354 bnx2_write_phy(bp, 0x10, val | 0x1);
2357 bnx2_write_phy(bp, 0x18, 0x7);
2358 bnx2_read_phy(bp, 0x18, &val);
2359 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2361 bnx2_read_phy(bp, 0x10, &val);
2362 bnx2_write_phy(bp, 0x10, val & ~0x1);
2365 /* ethernet@wirespeed */
2366 bnx2_write_phy(bp, 0x18, 0x7007);
2367 bnx2_read_phy(bp, 0x18, &val);
2368 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2374 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2375 __releases(&bp->phy_lock)
2376 __acquires(&bp->phy_lock)
2381 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2382 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2384 bp->mii_bmcr = MII_BMCR;
2385 bp->mii_bmsr = MII_BMSR;
2386 bp->mii_bmsr1 = MII_BMSR;
2387 bp->mii_adv = MII_ADVERTISE;
2388 bp->mii_lpa = MII_LPA;
2390 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2392 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2395 bnx2_read_phy(bp, MII_PHYSID1, &val);
2396 bp->phy_id = val << 16;
2397 bnx2_read_phy(bp, MII_PHYSID2, &val);
2398 bp->phy_id |= val & 0xffff;
2400 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2401 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2402 rc = bnx2_init_5706s_phy(bp, reset_phy);
2403 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2404 rc = bnx2_init_5708s_phy(bp, reset_phy);
2405 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2406 rc = bnx2_init_5709s_phy(bp, reset_phy);
2409 rc = bnx2_init_copper_phy(bp, reset_phy);
2414 rc = bnx2_setup_phy(bp, bp->phy_port);
2420 bnx2_set_mac_loopback(struct bnx2 *bp)
2424 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2425 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2426 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2427 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2432 static int bnx2_test_link(struct bnx2 *);
2435 bnx2_set_phy_loopback(struct bnx2 *bp)
2440 spin_lock_bh(&bp->phy_lock);
2441 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2443 spin_unlock_bh(&bp->phy_lock);
2447 for (i = 0; i < 10; i++) {
2448 if (bnx2_test_link(bp) == 0)
2453 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2454 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2455 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2456 BNX2_EMAC_MODE_25G_MODE);
2458 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2459 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2465 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2471 msg_data |= bp->fw_wr_seq;
2473 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2478 /* wait for an acknowledgement. */
2479 for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2482 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2484 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2487 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2490 /* If we timed out, inform the firmware that this is the case. */
2491 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2493 pr_err("fw sync timeout, reset code = %x\n", msg_data);
2495 msg_data &= ~BNX2_DRV_MSG_CODE;
2496 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2498 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2503 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2510 bnx2_init_5709_context(struct bnx2 *bp)
2515 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2516 val |= (BCM_PAGE_BITS - 8) << 16;
2517 REG_WR(bp, BNX2_CTX_COMMAND, val);
2518 for (i = 0; i < 10; i++) {
2519 val = REG_RD(bp, BNX2_CTX_COMMAND);
2520 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2524 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2527 for (i = 0; i < bp->ctx_pages; i++) {
2531 memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2535 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2536 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2537 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2538 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2539 (u64) bp->ctx_blk_mapping[i] >> 32);
2540 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2541 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2542 for (j = 0; j < 10; j++) {
2544 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2545 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2549 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2558 bnx2_init_context(struct bnx2 *bp)
2564 u32 vcid_addr, pcid_addr, offset;
2569 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2572 vcid_addr = GET_PCID_ADDR(vcid);
2574 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2579 pcid_addr = GET_PCID_ADDR(new_vcid);
2582 vcid_addr = GET_CID_ADDR(vcid);
2583 pcid_addr = vcid_addr;
2586 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2587 vcid_addr += (i << PHY_CTX_SHIFT);
2588 pcid_addr += (i << PHY_CTX_SHIFT);
2590 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2591 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2593 /* Zero out the context. */
2594 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2595 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2601 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2607 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2608 if (good_mbuf == NULL) {
2609 pr_err("Failed to allocate memory in %s\n", __func__);
2613 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2614 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2618 /* Allocate a bunch of mbufs and save the good ones in an array. */
2619 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2620 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2621 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2622 BNX2_RBUF_COMMAND_ALLOC_REQ);
2624 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2626 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2628 /* The addresses with Bit 9 set are bad memory blocks. */
2629 if (!(val & (1 << 9))) {
2630 good_mbuf[good_mbuf_cnt] = (u16) val;
2634 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2637 /* Free the good ones back to the mbuf pool thus discarding
2638 * all the bad ones. */
2639 while (good_mbuf_cnt) {
2642 val = good_mbuf[good_mbuf_cnt];
2643 val = (val << 9) | val | 1;
2645 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2652 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2656 val = (mac_addr[0] << 8) | mac_addr[1];
2658 REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2660 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2661 (mac_addr[4] << 8) | mac_addr[5];
2663 REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2667 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2670 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2671 struct rx_bd *rxbd =
2672 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2673 struct page *page = alloc_page(GFP_ATOMIC);
2677 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2678 PCI_DMA_FROMDEVICE);
2679 if (pci_dma_mapping_error(bp->pdev, mapping)) {
2685 dma_unmap_addr_set(rx_pg, mapping, mapping);
2686 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2687 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2692 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2694 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2695 struct page *page = rx_pg->page;
2700 pci_unmap_page(bp->pdev, dma_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2701 PCI_DMA_FROMDEVICE);
2708 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2710 struct sk_buff *skb;
2711 struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2713 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2714 unsigned long align;
2716 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2721 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2722 skb_reserve(skb, BNX2_RX_ALIGN - align);
2724 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2725 PCI_DMA_FROMDEVICE);
2726 if (pci_dma_mapping_error(bp->pdev, mapping)) {
2732 rx_buf->desc = (struct l2_fhdr *) skb->data;
2733 dma_unmap_addr_set(rx_buf, mapping, mapping);
2735 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2736 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2738 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2744 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2746 struct status_block *sblk = bnapi->status_blk.msi;
2747 u32 new_link_state, old_link_state;
2750 new_link_state = sblk->status_attn_bits & event;
2751 old_link_state = sblk->status_attn_bits_ack & event;
2752 if (new_link_state != old_link_state) {
2754 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2756 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2764 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2766 spin_lock(&bp->phy_lock);
2768 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2770 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2771 bnx2_set_remote_link(bp);
2773 spin_unlock(&bp->phy_lock);
2778 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2782 /* Tell compiler that status block fields can change. */
2784 cons = *bnapi->hw_tx_cons_ptr;
2786 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2792 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2794 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2795 u16 hw_cons, sw_cons, sw_ring_cons;
2796 int tx_pkt = 0, index;
2797 struct netdev_queue *txq;
2799 index = (bnapi - bp->bnx2_napi);
2800 txq = netdev_get_tx_queue(bp->dev, index);
2802 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2803 sw_cons = txr->tx_cons;
2805 while (sw_cons != hw_cons) {
2806 struct sw_tx_bd *tx_buf;
2807 struct sk_buff *skb;
2810 sw_ring_cons = TX_RING_IDX(sw_cons);
2812 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2815 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2816 prefetch(&skb->end);
2818 /* partial BD completions possible with TSO packets */
2819 if (tx_buf->is_gso) {
2820 u16 last_idx, last_ring_idx;
2822 last_idx = sw_cons + tx_buf->nr_frags + 1;
2823 last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2824 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2827 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2832 pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
2833 skb_headlen(skb), PCI_DMA_TODEVICE);
2836 last = tx_buf->nr_frags;
2838 for (i = 0; i < last; i++) {
2839 sw_cons = NEXT_TX_BD(sw_cons);
2841 pci_unmap_page(bp->pdev,
2843 &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2845 skb_shinfo(skb)->frags[i].size,
2849 sw_cons = NEXT_TX_BD(sw_cons);
2853 if (tx_pkt == budget)
2856 if (hw_cons == sw_cons)
2857 hw_cons = bnx2_get_hw_tx_cons(bnapi);
2860 txr->hw_tx_cons = hw_cons;
2861 txr->tx_cons = sw_cons;
2863 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2864 * before checking for netif_tx_queue_stopped(). Without the
2865 * memory barrier, there is a small possibility that bnx2_start_xmit()
2866 * will miss it and cause the queue to be stopped forever.
2870 if (unlikely(netif_tx_queue_stopped(txq)) &&
2871 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2872 __netif_tx_lock(txq, smp_processor_id());
2873 if ((netif_tx_queue_stopped(txq)) &&
2874 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2875 netif_tx_wake_queue(txq);
2876 __netif_tx_unlock(txq);
2883 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2884 struct sk_buff *skb, int count)
2886 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2887 struct rx_bd *cons_bd, *prod_bd;
2890 u16 cons = rxr->rx_pg_cons;
2892 cons_rx_pg = &rxr->rx_pg_ring[cons];
2894 /* The caller was unable to allocate a new page to replace the
2895 * last one in the frags array, so we need to recycle that page
2896 * and then free the skb.
2900 struct skb_shared_info *shinfo;
2902 shinfo = skb_shinfo(skb);
2904 page = shinfo->frags[shinfo->nr_frags].page;
2905 shinfo->frags[shinfo->nr_frags].page = NULL;
2907 cons_rx_pg->page = page;
2911 hw_prod = rxr->rx_pg_prod;
2913 for (i = 0; i < count; i++) {
2914 prod = RX_PG_RING_IDX(hw_prod);
2916 prod_rx_pg = &rxr->rx_pg_ring[prod];
2917 cons_rx_pg = &rxr->rx_pg_ring[cons];
2918 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2919 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2922 prod_rx_pg->page = cons_rx_pg->page;
2923 cons_rx_pg->page = NULL;
2924 dma_unmap_addr_set(prod_rx_pg, mapping,
2925 dma_unmap_addr(cons_rx_pg, mapping));
2927 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2928 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2931 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2932 hw_prod = NEXT_RX_BD(hw_prod);
2934 rxr->rx_pg_prod = hw_prod;
2935 rxr->rx_pg_cons = cons;
2939 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2940 struct sk_buff *skb, u16 cons, u16 prod)
2942 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2943 struct rx_bd *cons_bd, *prod_bd;
2945 cons_rx_buf = &rxr->rx_buf_ring[cons];
2946 prod_rx_buf = &rxr->rx_buf_ring[prod];
2948 pci_dma_sync_single_for_device(bp->pdev,
2949 dma_unmap_addr(cons_rx_buf, mapping),
2950 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2952 rxr->rx_prod_bseq += bp->rx_buf_use_size;
2954 prod_rx_buf->skb = skb;
2955 prod_rx_buf->desc = (struct l2_fhdr *) skb->data;
2960 dma_unmap_addr_set(prod_rx_buf, mapping,
2961 dma_unmap_addr(cons_rx_buf, mapping));
2963 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2964 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2965 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2966 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2970 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2971 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2975 u16 prod = ring_idx & 0xffff;
2977 err = bnx2_alloc_rx_skb(bp, rxr, prod);
2978 if (unlikely(err)) {
2979 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2981 unsigned int raw_len = len + 4;
2982 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2984 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2989 skb_reserve(skb, BNX2_RX_OFFSET);
2990 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2991 PCI_DMA_FROMDEVICE);
2997 unsigned int i, frag_len, frag_size, pages;
2998 struct sw_pg *rx_pg;
2999 u16 pg_cons = rxr->rx_pg_cons;
3000 u16 pg_prod = rxr->rx_pg_prod;
3002 frag_size = len + 4 - hdr_len;
3003 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
3004 skb_put(skb, hdr_len);
3006 for (i = 0; i < pages; i++) {
3007 dma_addr_t mapping_old;
3009 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3010 if (unlikely(frag_len <= 4)) {
3011 unsigned int tail = 4 - frag_len;
3013 rxr->rx_pg_cons = pg_cons;
3014 rxr->rx_pg_prod = pg_prod;
3015 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
3022 &skb_shinfo(skb)->frags[i - 1];
3024 skb->data_len -= tail;
3025 skb->truesize -= tail;
3029 rx_pg = &rxr->rx_pg_ring[pg_cons];
3031 /* Don't unmap yet. If we're unable to allocate a new
3032 * page, we need to recycle the page and the DMA addr.
3034 mapping_old = dma_unmap_addr(rx_pg, mapping);
3038 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3041 err = bnx2_alloc_rx_page(bp, rxr,
3042 RX_PG_RING_IDX(pg_prod));
3043 if (unlikely(err)) {
3044 rxr->rx_pg_cons = pg_cons;
3045 rxr->rx_pg_prod = pg_prod;
3046 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3051 pci_unmap_page(bp->pdev, mapping_old,
3052 PAGE_SIZE, PCI_DMA_FROMDEVICE);
3054 frag_size -= frag_len;
3055 skb->data_len += frag_len;
3056 skb->truesize += frag_len;
3057 skb->len += frag_len;
3059 pg_prod = NEXT_RX_BD(pg_prod);
3060 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3062 rxr->rx_pg_prod = pg_prod;
3063 rxr->rx_pg_cons = pg_cons;
3069 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3073 /* Tell compiler that status block fields can change. */
3075 cons = *bnapi->hw_rx_cons_ptr;
3077 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3083 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3085 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3086 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3087 struct l2_fhdr *rx_hdr;
3088 int rx_pkt = 0, pg_ring_used = 0;
3089 struct pci_dev *pdev = bp->pdev;
3091 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3092 sw_cons = rxr->rx_cons;
3093 sw_prod = rxr->rx_prod;
3095 /* Memory barrier necessary as speculative reads of the rx
3096 * buffer can be ahead of the index in the status block
3099 while (sw_cons != hw_cons) {
3100 unsigned int len, hdr_len;
3102 struct sw_bd *rx_buf, *next_rx_buf;
3103 struct sk_buff *skb;
3104 dma_addr_t dma_addr;
3106 int hw_vlan __maybe_unused = 0;
3108 sw_ring_cons = RX_RING_IDX(sw_cons);
3109 sw_ring_prod = RX_RING_IDX(sw_prod);
3111 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3115 if (!get_dma_ops(&pdev->dev)->sync_single_for_cpu) {
3118 RX_RING_IDX(NEXT_RX_BD(sw_cons))];
3119 prefetch(next_rx_buf->desc);
3123 dma_addr = dma_unmap_addr(rx_buf, mapping);
3125 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
3126 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3127 PCI_DMA_FROMDEVICE);
3129 rx_hdr = rx_buf->desc;
3130 len = rx_hdr->l2_fhdr_pkt_len;
3131 status = rx_hdr->l2_fhdr_status;
3134 if (status & L2_FHDR_STATUS_SPLIT) {
3135 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3137 } else if (len > bp->rx_jumbo_thresh) {
3138 hdr_len = bp->rx_jumbo_thresh;
3142 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3143 L2_FHDR_ERRORS_PHY_DECODE |
3144 L2_FHDR_ERRORS_ALIGNMENT |
3145 L2_FHDR_ERRORS_TOO_SHORT |
3146 L2_FHDR_ERRORS_GIANT_FRAME))) {
3148 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3153 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3155 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3162 if (len <= bp->rx_copy_thresh) {
3163 struct sk_buff *new_skb;
3165 new_skb = netdev_alloc_skb(bp->dev, len + 6);
3166 if (new_skb == NULL) {
3167 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3173 skb_copy_from_linear_data_offset(skb,
3175 new_skb->data, len + 6);
3176 skb_reserve(new_skb, 6);
3177 skb_put(new_skb, len);
3179 bnx2_reuse_rx_skb(bp, rxr, skb,
3180 sw_ring_cons, sw_ring_prod);
3183 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
3184 dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
3187 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3188 !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
3189 vtag = rx_hdr->l2_fhdr_vlan_tag;
3196 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
3199 memmove(ve, skb->data + 4, ETH_ALEN * 2);
3200 ve->h_vlan_proto = htons(ETH_P_8021Q);
3201 ve->h_vlan_TCI = htons(vtag);
3206 skb->protocol = eth_type_trans(skb, bp->dev);
3208 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3209 (ntohs(skb->protocol) != 0x8100)) {
3216 skb->ip_summed = CHECKSUM_NONE;
3218 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3219 L2_FHDR_STATUS_UDP_DATAGRAM))) {
3221 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3222 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3223 skb->ip_summed = CHECKSUM_UNNECESSARY;
3226 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3230 vlan_gro_receive(&bnapi->napi, bp->vlgrp, vtag, skb);
3233 napi_gro_receive(&bnapi->napi, skb);
3238 sw_cons = NEXT_RX_BD(sw_cons);
3239 sw_prod = NEXT_RX_BD(sw_prod);
3241 if ((rx_pkt == budget))
3244 /* Refresh hw_cons to see if there is new work */
3245 if (sw_cons == hw_cons) {
3246 hw_cons = bnx2_get_hw_rx_cons(bnapi);
3250 rxr->rx_cons = sw_cons;
3251 rxr->rx_prod = sw_prod;
3254 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3256 REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3258 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3266 /* MSI ISR - The only difference between this and the INTx ISR
3267 * is that the MSI interrupt is always serviced.
3270 bnx2_msi(int irq, void *dev_instance)
3272 struct bnx2_napi *bnapi = dev_instance;
3273 struct bnx2 *bp = bnapi->bp;
3275 prefetch(bnapi->status_blk.msi);
3276 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3277 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3278 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3280 /* Return here if interrupt is disabled. */
3281 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3284 napi_schedule(&bnapi->napi);
3290 bnx2_msi_1shot(int irq, void *dev_instance)
3292 struct bnx2_napi *bnapi = dev_instance;
3293 struct bnx2 *bp = bnapi->bp;
3295 prefetch(bnapi->status_blk.msi);
3297 /* Return here if interrupt is disabled. */
3298 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3301 napi_schedule(&bnapi->napi);
3307 bnx2_interrupt(int irq, void *dev_instance)
3309 struct bnx2_napi *bnapi = dev_instance;
3310 struct bnx2 *bp = bnapi->bp;
3311 struct status_block *sblk = bnapi->status_blk.msi;
3313 /* When using INTx, it is possible for the interrupt to arrive
3314 * at the CPU before the status block posted prior to the
3315 * interrupt. Reading a register will flush the status block.
3316 * When using MSI, the MSI message will always complete after
3317 * the status block write.
3319 if ((sblk->status_idx == bnapi->last_status_idx) &&
3320 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3321 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3324 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3325 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3326 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3328 /* Read back to deassert IRQ immediately to avoid too many
3329 * spurious interrupts.
3331 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3333 /* Return here if interrupt is shared and is disabled. */
3334 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3337 if (napi_schedule_prep(&bnapi->napi)) {
3338 bnapi->last_status_idx = sblk->status_idx;
3339 __napi_schedule(&bnapi->napi);
3346 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3348 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3349 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3351 if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3352 (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3357 #define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
3358 STATUS_ATTN_BITS_TIMER_ABORT)
3361 bnx2_has_work(struct bnx2_napi *bnapi)
3363 struct status_block *sblk = bnapi->status_blk.msi;
3365 if (bnx2_has_fast_work(bnapi))
3369 if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3373 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3374 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3381 bnx2_chk_missed_msi(struct bnx2 *bp)
3383 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3386 if (bnx2_has_work(bnapi)) {
3387 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3388 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3391 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3392 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3393 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3394 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3395 bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3399 bp->idle_chk_status_idx = bnapi->last_status_idx;
3403 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3405 struct cnic_ops *c_ops;
3407 if (!bnapi->cnic_present)
3411 c_ops = rcu_dereference(bp->cnic_ops);
3413 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3414 bnapi->status_blk.msi);
3419 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3421 struct status_block *sblk = bnapi->status_blk.msi;
3422 u32 status_attn_bits = sblk->status_attn_bits;
3423 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3425 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3426 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3428 bnx2_phy_int(bp, bnapi);
3430 /* This is needed to take care of transient status
3431 * during link changes.
3433 REG_WR(bp, BNX2_HC_COMMAND,
3434 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3435 REG_RD(bp, BNX2_HC_COMMAND);
3439 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3440 int work_done, int budget)
3442 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3443 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3445 if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3446 bnx2_tx_int(bp, bnapi, 0);
3448 if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3449 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3454 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3456 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3457 struct bnx2 *bp = bnapi->bp;
3459 struct status_block_msix *sblk = bnapi->status_blk.msix;
3462 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3463 if (unlikely(work_done >= budget))
3466 bnapi->last_status_idx = sblk->status_idx;
3467 /* status idx must be read before checking for more work. */
3469 if (likely(!bnx2_has_fast_work(bnapi))) {
3471 napi_complete(napi);
3472 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3473 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3474 bnapi->last_status_idx);
3481 static int bnx2_poll(struct napi_struct *napi, int budget)
3483 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3484 struct bnx2 *bp = bnapi->bp;
3486 struct status_block *sblk = bnapi->status_blk.msi;
3489 bnx2_poll_link(bp, bnapi);
3491 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3494 bnx2_poll_cnic(bp, bnapi);
3497 /* bnapi->last_status_idx is used below to tell the hw how
3498 * much work has been processed, so we must read it before
3499 * checking for more work.
3501 bnapi->last_status_idx = sblk->status_idx;
3503 if (unlikely(work_done >= budget))
3507 if (likely(!bnx2_has_work(bnapi))) {
3508 napi_complete(napi);
3509 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3510 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3511 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3512 bnapi->last_status_idx);
3515 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3516 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3517 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3518 bnapi->last_status_idx);
3520 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3521 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3522 bnapi->last_status_idx);
3530 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3531 * from set_multicast.
3534 bnx2_set_rx_mode(struct net_device *dev)
3536 struct bnx2 *bp = netdev_priv(dev);
3537 u32 rx_mode, sort_mode;
3538 struct netdev_hw_addr *ha;
3541 if (!netif_running(dev))
3544 spin_lock_bh(&bp->phy_lock);
3546 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3547 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3548 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3550 if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3551 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3553 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
3554 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3556 if (dev->flags & IFF_PROMISC) {
3557 /* Promiscuous mode. */
3558 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3559 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3560 BNX2_RPM_SORT_USER0_PROM_VLAN;
3562 else if (dev->flags & IFF_ALLMULTI) {
3563 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3564 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3567 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3570 /* Accept one or more multicast(s). */
3571 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3576 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3578 netdev_for_each_mc_addr(ha, dev) {
3579 crc = ether_crc_le(ETH_ALEN, ha->addr);
3581 regidx = (bit & 0xe0) >> 5;
3583 mc_filter[regidx] |= (1 << bit);
3586 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3587 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3591 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3594 if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
3595 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3596 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3597 BNX2_RPM_SORT_USER0_PROM_VLAN;
3598 } else if (!(dev->flags & IFF_PROMISC)) {
3599 /* Add all entries into to the match filter list */
3601 netdev_for_each_uc_addr(ha, dev) {
3602 bnx2_set_mac_addr(bp, ha->addr,
3603 i + BNX2_START_UNICAST_ADDRESS_INDEX);
3605 (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3611 if (rx_mode != bp->rx_mode) {
3612 bp->rx_mode = rx_mode;
3613 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3616 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3617 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3618 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3620 spin_unlock_bh(&bp->phy_lock);
3623 static int __devinit
3624 check_fw_section(const struct firmware *fw,
3625 const struct bnx2_fw_file_section *section,
3626 u32 alignment, bool non_empty)
3628 u32 offset = be32_to_cpu(section->offset);
3629 u32 len = be32_to_cpu(section->len);
3631 if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3633 if ((non_empty && len == 0) || len > fw->size - offset ||
3634 len & (alignment - 1))
3639 static int __devinit
3640 check_mips_fw_entry(const struct firmware *fw,
3641 const struct bnx2_mips_fw_file_entry *entry)
3643 if (check_fw_section(fw, &entry->text, 4, true) ||
3644 check_fw_section(fw, &entry->data, 4, false) ||
3645 check_fw_section(fw, &entry->rodata, 4, false))
3650 static int __devinit
3651 bnx2_request_firmware(struct bnx2 *bp)
3653 const char *mips_fw_file, *rv2p_fw_file;
3654 const struct bnx2_mips_fw_file *mips_fw;
3655 const struct bnx2_rv2p_fw_file *rv2p_fw;
3658 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3659 mips_fw_file = FW_MIPS_FILE_09;
3660 if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3661 (CHIP_ID(bp) == CHIP_ID_5709_A1))
3662 rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3664 rv2p_fw_file = FW_RV2P_FILE_09;
3666 mips_fw_file = FW_MIPS_FILE_06;
3667 rv2p_fw_file = FW_RV2P_FILE_06;
3670 rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3672 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
3676 rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3678 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
3681 mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3682 rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3683 if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3684 check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3685 check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3686 check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3687 check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3688 check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3689 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
3692 if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3693 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3694 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3695 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
3703 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3706 case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3707 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3708 rv2p_code |= RV2P_BD_PAGE_SIZE;
3715 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3716 const struct bnx2_rv2p_fw_file_entry *fw_entry)
3718 u32 rv2p_code_len, file_offset;
3723 rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3724 file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3726 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3728 if (rv2p_proc == RV2P_PROC1) {
3729 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3730 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3732 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3733 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3736 for (i = 0; i < rv2p_code_len; i += 8) {
3737 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3739 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3742 val = (i / 8) | cmd;
3743 REG_WR(bp, addr, val);
3746 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3747 for (i = 0; i < 8; i++) {
3750 loc = be32_to_cpu(fw_entry->fixup[i]);
3751 if (loc && ((loc * 4) < rv2p_code_len)) {
3752 code = be32_to_cpu(*(rv2p_code + loc - 1));
3753 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3754 code = be32_to_cpu(*(rv2p_code + loc));
3755 code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3756 REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3758 val = (loc / 2) | cmd;
3759 REG_WR(bp, addr, val);
3763 /* Reset the processor, un-stall is done later. */
3764 if (rv2p_proc == RV2P_PROC1) {
3765 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3768 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3775 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3776 const struct bnx2_mips_fw_file_entry *fw_entry)
3778 u32 addr, len, file_offset;
3784 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3785 val |= cpu_reg->mode_value_halt;
3786 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3787 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3789 /* Load the Text area. */
3790 addr = be32_to_cpu(fw_entry->text.addr);
3791 len = be32_to_cpu(fw_entry->text.len);
3792 file_offset = be32_to_cpu(fw_entry->text.offset);
3793 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3795 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3799 for (j = 0; j < (len / 4); j++, offset += 4)
3800 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3803 /* Load the Data area. */
3804 addr = be32_to_cpu(fw_entry->data.addr);
3805 len = be32_to_cpu(fw_entry->data.len);
3806 file_offset = be32_to_cpu(fw_entry->data.offset);
3807 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3809 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3813 for (j = 0; j < (len / 4); j++, offset += 4)
3814 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3817 /* Load the Read-Only area. */
3818 addr = be32_to_cpu(fw_entry->rodata.addr);
3819 len = be32_to_cpu(fw_entry->rodata.len);
3820 file_offset = be32_to_cpu(fw_entry->rodata.offset);
3821 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3823 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3827 for (j = 0; j < (len / 4); j++, offset += 4)
3828 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3831 /* Clear the pre-fetch instruction. */
3832 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3834 val = be32_to_cpu(fw_entry->start_addr);
3835 bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3837 /* Start the CPU. */
3838 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3839 val &= ~cpu_reg->mode_value_halt;
3840 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3841 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3847 bnx2_init_cpus(struct bnx2 *bp)
3849 const struct bnx2_mips_fw_file *mips_fw =
3850 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3851 const struct bnx2_rv2p_fw_file *rv2p_fw =
3852 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3855 /* Initialize the RV2P processor. */
3856 load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3857 load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3859 /* Initialize the RX Processor. */
3860 rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3864 /* Initialize the TX Processor. */
3865 rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3869 /* Initialize the TX Patch-up Processor. */
3870 rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3874 /* Initialize the Completion Processor. */
3875 rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3879 /* Initialize the Command Processor. */
3880 rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3887 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3891 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3897 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3898 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3899 PCI_PM_CTRL_PME_STATUS);
3901 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3902 /* delay required during transition out of D3hot */
3905 val = REG_RD(bp, BNX2_EMAC_MODE);
3906 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3907 val &= ~BNX2_EMAC_MODE_MPKT;
3908 REG_WR(bp, BNX2_EMAC_MODE, val);
3910 val = REG_RD(bp, BNX2_RPM_CONFIG);
3911 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3912 REG_WR(bp, BNX2_RPM_CONFIG, val);
3923 autoneg = bp->autoneg;
3924 advertising = bp->advertising;
3926 if (bp->phy_port == PORT_TP) {
3927 bp->autoneg = AUTONEG_SPEED;
3928 bp->advertising = ADVERTISED_10baseT_Half |
3929 ADVERTISED_10baseT_Full |
3930 ADVERTISED_100baseT_Half |
3931 ADVERTISED_100baseT_Full |
3935 spin_lock_bh(&bp->phy_lock);
3936 bnx2_setup_phy(bp, bp->phy_port);
3937 spin_unlock_bh(&bp->phy_lock);
3939 bp->autoneg = autoneg;
3940 bp->advertising = advertising;
3942 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3944 val = REG_RD(bp, BNX2_EMAC_MODE);
3946 /* Enable port mode. */
3947 val &= ~BNX2_EMAC_MODE_PORT;
3948 val |= BNX2_EMAC_MODE_MPKT_RCVD |
3949 BNX2_EMAC_MODE_ACPI_RCVD |
3950 BNX2_EMAC_MODE_MPKT;
3951 if (bp->phy_port == PORT_TP)
3952 val |= BNX2_EMAC_MODE_PORT_MII;
3954 val |= BNX2_EMAC_MODE_PORT_GMII;
3955 if (bp->line_speed == SPEED_2500)
3956 val |= BNX2_EMAC_MODE_25G_MODE;
3959 REG_WR(bp, BNX2_EMAC_MODE, val);
3961 /* receive all multicast */
3962 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3963 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3966 REG_WR(bp, BNX2_EMAC_RX_MODE,
3967 BNX2_EMAC_RX_MODE_SORT_MODE);
3969 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3970 BNX2_RPM_SORT_USER0_MC_EN;
3971 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3972 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3973 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3974 BNX2_RPM_SORT_USER0_ENA);
3976 /* Need to enable EMAC and RPM for WOL. */
3977 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3978 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3979 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3980 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3982 val = REG_RD(bp, BNX2_RPM_CONFIG);
3983 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3984 REG_WR(bp, BNX2_RPM_CONFIG, val);
3986 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3989 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3992 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3993 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3996 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3997 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3998 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
4007 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
4009 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
4012 /* No more memory access after this point until
4013 * device is brought back to D0.
4025 bnx2_acquire_nvram_lock(struct bnx2 *bp)
4030 /* Request access to the flash interface. */
4031 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4032 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4033 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4034 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4040 if (j >= NVRAM_TIMEOUT_COUNT)
4047 bnx2_release_nvram_lock(struct bnx2 *bp)
4052 /* Relinquish nvram interface. */
4053 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4055 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4056 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4057 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4063 if (j >= NVRAM_TIMEOUT_COUNT)
4071 bnx2_enable_nvram_write(struct bnx2 *bp)
4075 val = REG_RD(bp, BNX2_MISC_CFG);
4076 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4078 if (bp->flash_info->flags & BNX2_NV_WREN) {
4081 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4082 REG_WR(bp, BNX2_NVM_COMMAND,
4083 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4085 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4088 val = REG_RD(bp, BNX2_NVM_COMMAND);
4089 if (val & BNX2_NVM_COMMAND_DONE)
4093 if (j >= NVRAM_TIMEOUT_COUNT)
4100 bnx2_disable_nvram_write(struct bnx2 *bp)
4104 val = REG_RD(bp, BNX2_MISC_CFG);
4105 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4110 bnx2_enable_nvram_access(struct bnx2 *bp)
4114 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4115 /* Enable both bits, even on read. */
4116 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4117 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4121 bnx2_disable_nvram_access(struct bnx2 *bp)
4125 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4126 /* Disable both bits, even after read. */
4127 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4128 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4129 BNX2_NVM_ACCESS_ENABLE_WR_EN));
4133 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4138 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4139 /* Buffered flash, no erase needed */
4142 /* Build an erase command */
4143 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4144 BNX2_NVM_COMMAND_DOIT;
4146 /* Need to clear DONE bit separately. */
4147 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4149 /* Address of the NVRAM to read from. */
4150 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4152 /* Issue an erase command. */
4153 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4155 /* Wait for completion. */
4156 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4161 val = REG_RD(bp, BNX2_NVM_COMMAND);
4162 if (val & BNX2_NVM_COMMAND_DONE)
4166 if (j >= NVRAM_TIMEOUT_COUNT)
4173 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4178 /* Build the command word. */
4179 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4181 /* Calculate an offset of a buffered flash, not needed for 5709. */
4182 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4183 offset = ((offset / bp->flash_info->page_size) <<
4184 bp->flash_info->page_bits) +
4185 (offset % bp->flash_info->page_size);
4188 /* Need to clear DONE bit separately. */
4189 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4191 /* Address of the NVRAM to read from. */
4192 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4194 /* Issue a read command. */
4195 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4197 /* Wait for completion. */
4198 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4203 val = REG_RD(bp, BNX2_NVM_COMMAND);
4204 if (val & BNX2_NVM_COMMAND_DONE) {
4205 __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4206 memcpy(ret_val, &v, 4);
4210 if (j >= NVRAM_TIMEOUT_COUNT)
4218 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4224 /* Build the command word. */
4225 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4227 /* Calculate an offset of a buffered flash, not needed for 5709. */
4228 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4229 offset = ((offset / bp->flash_info->page_size) <<
4230 bp->flash_info->page_bits) +
4231 (offset % bp->flash_info->page_size);
4234 /* Need to clear DONE bit separately. */
4235 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4237 memcpy(&val32, val, 4);
4239 /* Write the data. */
4240 REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4242 /* Address of the NVRAM to write to. */
4243 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4245 /* Issue the write command. */
4246 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4248 /* Wait for completion. */
4249 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4252 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4255 if (j >= NVRAM_TIMEOUT_COUNT)
4262 bnx2_init_nvram(struct bnx2 *bp)
4265 int j, entry_count, rc = 0;
4266 const struct flash_spec *flash;
4268 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4269 bp->flash_info = &flash_5709;
4270 goto get_flash_size;
4273 /* Determine the selected interface. */
4274 val = REG_RD(bp, BNX2_NVM_CFG1);
4276 entry_count = ARRAY_SIZE(flash_table);
4278 if (val & 0x40000000) {
4280 /* Flash interface has been reconfigured */
4281 for (j = 0, flash = &flash_table[0]; j < entry_count;
4283 if ((val & FLASH_BACKUP_STRAP_MASK) ==
4284 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4285 bp->flash_info = flash;
4292 /* Not yet been reconfigured */
4294 if (val & (1 << 23))
4295 mask = FLASH_BACKUP_STRAP_MASK;
4297 mask = FLASH_STRAP_MASK;
4299 for (j = 0, flash = &flash_table[0]; j < entry_count;
4302 if ((val & mask) == (flash->strapping & mask)) {
4303 bp->flash_info = flash;
4305 /* Request access to the flash interface. */
4306 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4309 /* Enable access to flash interface */
4310 bnx2_enable_nvram_access(bp);
4312 /* Reconfigure the flash interface */
4313 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4314 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4315 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4316 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4318 /* Disable access to flash interface */
4319 bnx2_disable_nvram_access(bp);
4320 bnx2_release_nvram_lock(bp);
4325 } /* if (val & 0x40000000) */
4327 if (j == entry_count) {
4328 bp->flash_info = NULL;
4329 pr_alert("Unknown flash/EEPROM type\n");
4334 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4335 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4337 bp->flash_size = val;
4339 bp->flash_size = bp->flash_info->total_size;
4345 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4349 u32 cmd_flags, offset32, len32, extra;
4354 /* Request access to the flash interface. */
4355 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4358 /* Enable access to flash interface */
4359 bnx2_enable_nvram_access(bp);
4372 pre_len = 4 - (offset & 3);
4374 if (pre_len >= len32) {
4376 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4377 BNX2_NVM_COMMAND_LAST;
4380 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4383 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4388 memcpy(ret_buf, buf + (offset & 3), pre_len);
4395 extra = 4 - (len32 & 3);
4396 len32 = (len32 + 4) & ~3;
4403 cmd_flags = BNX2_NVM_COMMAND_LAST;
4405 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4406 BNX2_NVM_COMMAND_LAST;
4408 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4410 memcpy(ret_buf, buf, 4 - extra);
4412 else if (len32 > 0) {
4415 /* Read the first word. */
4419 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4421 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4423 /* Advance to the next dword. */
4428 while (len32 > 4 && rc == 0) {
4429 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4431 /* Advance to the next dword. */
4440 cmd_flags = BNX2_NVM_COMMAND_LAST;
4441 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4443 memcpy(ret_buf, buf, 4 - extra);
4446 /* Disable access to flash interface */
4447 bnx2_disable_nvram_access(bp);
4449 bnx2_release_nvram_lock(bp);
4455 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4458 u32 written, offset32, len32;
4459 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4461 int align_start, align_end;
4466 align_start = align_end = 0;
4468 if ((align_start = (offset32 & 3))) {
4470 len32 += align_start;
4473 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4478 align_end = 4 - (len32 & 3);
4480 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4484 if (align_start || align_end) {
4485 align_buf = kmalloc(len32, GFP_KERNEL);
4486 if (align_buf == NULL)
4489 memcpy(align_buf, start, 4);
4492 memcpy(align_buf + len32 - 4, end, 4);
4494 memcpy(align_buf + align_start, data_buf, buf_size);
4498 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4499 flash_buffer = kmalloc(264, GFP_KERNEL);
4500 if (flash_buffer == NULL) {
4502 goto nvram_write_end;
4507 while ((written < len32) && (rc == 0)) {
4508 u32 page_start, page_end, data_start, data_end;
4509 u32 addr, cmd_flags;
4512 /* Find the page_start addr */
4513 page_start = offset32 + written;
4514 page_start -= (page_start % bp->flash_info->page_size);
4515 /* Find the page_end addr */
4516 page_end = page_start + bp->flash_info->page_size;
4517 /* Find the data_start addr */
4518 data_start = (written == 0) ? offset32 : page_start;
4519 /* Find the data_end addr */
4520 data_end = (page_end > offset32 + len32) ?
4521 (offset32 + len32) : page_end;
4523 /* Request access to the flash interface. */
4524 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4525 goto nvram_write_end;
4527 /* Enable access to flash interface */
4528 bnx2_enable_nvram_access(bp);
4530 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4531 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4534 /* Read the whole page into the buffer
4535 * (non-buffer flash only) */
4536 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4537 if (j == (bp->flash_info->page_size - 4)) {
4538 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4540 rc = bnx2_nvram_read_dword(bp,
4546 goto nvram_write_end;
4552 /* Enable writes to flash interface (unlock write-protect) */
4553 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4554 goto nvram_write_end;
4556 /* Loop to write back the buffer data from page_start to
4559 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4560 /* Erase the page */
4561 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4562 goto nvram_write_end;
4564 /* Re-enable the write again for the actual write */
4565 bnx2_enable_nvram_write(bp);
4567 for (addr = page_start; addr < data_start;
4568 addr += 4, i += 4) {
4570 rc = bnx2_nvram_write_dword(bp, addr,
4571 &flash_buffer[i], cmd_flags);
4574 goto nvram_write_end;
4580 /* Loop to write the new data from data_start to data_end */
4581 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4582 if ((addr == page_end - 4) ||
4583 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4584 (addr == data_end - 4))) {
4586 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4588 rc = bnx2_nvram_write_dword(bp, addr, buf,
4592 goto nvram_write_end;
4598 /* Loop to write back the buffer data from data_end
4600 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4601 for (addr = data_end; addr < page_end;
4602 addr += 4, i += 4) {
4604 if (addr == page_end-4) {
4605 cmd_flags = BNX2_NVM_COMMAND_LAST;
4607 rc = bnx2_nvram_write_dword(bp, addr,
4608 &flash_buffer[i], cmd_flags);
4611 goto nvram_write_end;
4617 /* Disable writes to flash interface (lock write-protect) */
4618 bnx2_disable_nvram_write(bp);
4620 /* Disable access to flash interface */
4621 bnx2_disable_nvram_access(bp);
4622 bnx2_release_nvram_lock(bp);
4624 /* Increment written */
4625 written += data_end - data_start;
4629 kfree(flash_buffer);
4635 bnx2_init_fw_cap(struct bnx2 *bp)
4639 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4640 bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4642 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4643 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4645 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4646 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4649 if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4650 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4651 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4654 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4655 (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4658 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4660 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4661 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4662 bp->phy_port = PORT_FIBRE;
4664 bp->phy_port = PORT_TP;
4666 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4667 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4670 if (netif_running(bp->dev) && sig)
4671 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4675 bnx2_setup_msix_tbl(struct bnx2 *bp)
4677 REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4679 REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4680 REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4684 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4690 /* Wait for the current PCI transaction to complete before
4691 * issuing a reset. */
4692 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4693 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4694 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4695 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4696 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4697 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4700 /* Wait for the firmware to tell us it is ok to issue a reset. */
4701 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4703 /* Deposit a driver reset signature so the firmware knows that
4704 * this is a soft reset. */
4705 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4706 BNX2_DRV_RESET_SIGNATURE_MAGIC);
4708 /* Do a dummy read to force the chip to complete all current transaction
4709 * before we issue a reset. */
4710 val = REG_RD(bp, BNX2_MISC_ID);
4712 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4713 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4714 REG_RD(bp, BNX2_MISC_COMMAND);
4717 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4718 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4720 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4723 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4724 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4725 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4728 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4730 /* Reading back any register after chip reset will hang the
4731 * bus on 5706 A0 and A1. The msleep below provides plenty
4732 * of margin for write posting.
4734 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4735 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4738 /* Reset takes approximate 30 usec */
4739 for (i = 0; i < 10; i++) {
4740 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4741 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4742 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4747 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4748 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4749 pr_err("Chip reset did not complete\n");
4754 /* Make sure byte swapping is properly configured. */
4755 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4756 if (val != 0x01020304) {
4757 pr_err("Chip not in correct endian mode\n");
4761 /* Wait for the firmware to finish its initialization. */
4762 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4766 spin_lock_bh(&bp->phy_lock);
4767 old_port = bp->phy_port;
4768 bnx2_init_fw_cap(bp);
4769 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4770 old_port != bp->phy_port)
4771 bnx2_set_default_remote_link(bp);
4772 spin_unlock_bh(&bp->phy_lock);
4774 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4775 /* Adjust the voltage regular to two steps lower. The default
4776 * of this register is 0x0000000e. */
4777 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4779 /* Remove bad rbuf memory from the free pool. */
4780 rc = bnx2_alloc_bad_rbuf(bp);
4783 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4784 bnx2_setup_msix_tbl(bp);
4785 /* Prevent MSIX table reads and write from timing out */
4786 REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
4787 BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4794 bnx2_init_chip(struct bnx2 *bp)
4799 /* Make sure the interrupt is not active. */
4800 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4802 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4803 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4805 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4807 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4808 DMA_READ_CHANS << 12 |
4809 DMA_WRITE_CHANS << 16;
4811 val |= (0x2 << 20) | (1 << 11);
4813 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4816 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4817 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4818 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4820 REG_WR(bp, BNX2_DMA_CONFIG, val);
4822 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4823 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4824 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4825 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4828 if (bp->flags & BNX2_FLAG_PCIX) {
4831 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4833 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4834 val16 & ~PCI_X_CMD_ERO);
4837 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4838 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4839 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4840 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4842 /* Initialize context mapping and zero out the quick contexts. The
4843 * context block must have already been enabled. */
4844 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4845 rc = bnx2_init_5709_context(bp);
4849 bnx2_init_context(bp);
4851 if ((rc = bnx2_init_cpus(bp)) != 0)
4854 bnx2_init_nvram(bp);
4856 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4858 val = REG_RD(bp, BNX2_MQ_CONFIG);
4859 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4860 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4861 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4862 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4863 if (CHIP_REV(bp) == CHIP_REV_Ax)
4864 val |= BNX2_MQ_CONFIG_HALT_DIS;
4867 REG_WR(bp, BNX2_MQ_CONFIG, val);
4869 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4870 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4871 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4873 val = (BCM_PAGE_BITS - 8) << 24;
4874 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4876 /* Configure page size. */
4877 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4878 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4879 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4880 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4882 val = bp->mac_addr[0] +
4883 (bp->mac_addr[1] << 8) +
4884 (bp->mac_addr[2] << 16) +
4886 (bp->mac_addr[4] << 8) +
4887 (bp->mac_addr[5] << 16);
4888 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4890 /* Program the MTU. Also include 4 bytes for CRC32. */
4892 val = mtu + ETH_HLEN + ETH_FCS_LEN;
4893 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4894 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4895 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4900 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4901 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4902 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4904 memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4905 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4906 bp->bnx2_napi[i].last_status_idx = 0;
4908 bp->idle_chk_status_idx = 0xffff;
4910 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4912 /* Set up how to generate a link change interrupt. */
4913 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4915 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4916 (u64) bp->status_blk_mapping & 0xffffffff);
4917 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4919 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4920 (u64) bp->stats_blk_mapping & 0xffffffff);
4921 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4922 (u64) bp->stats_blk_mapping >> 32);
4924 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4925 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4927 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4928 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4930 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4931 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4933 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4935 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4937 REG_WR(bp, BNX2_HC_COM_TICKS,
4938 (bp->com_ticks_int << 16) | bp->com_ticks);
4940 REG_WR(bp, BNX2_HC_CMD_TICKS,
4941 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4943 if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4944 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4946 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4947 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4949 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4950 val = BNX2_HC_CONFIG_COLLECT_STATS;
4952 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4953 BNX2_HC_CONFIG_COLLECT_STATS;
4956 if (bp->flags & BNX2_FLAG_USING_MSIX) {
4957 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4958 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4960 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4963 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4964 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
4966 REG_WR(bp, BNX2_HC_CONFIG, val);
4968 for (i = 1; i < bp->irq_nvecs; i++) {
4969 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4970 BNX2_HC_SB_CONFIG_1;
4973 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4974 BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4975 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4977 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4978 (bp->tx_quick_cons_trip_int << 16) |
4979 bp->tx_quick_cons_trip);
4981 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4982 (bp->tx_ticks_int << 16) | bp->tx_ticks);
4984 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4985 (bp->rx_quick_cons_trip_int << 16) |
4986 bp->rx_quick_cons_trip);
4988 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4989 (bp->rx_ticks_int << 16) | bp->rx_ticks);
4992 /* Clear internal stats counters. */
4993 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4995 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4997 /* Initialize the receive filter. */
4998 bnx2_set_rx_mode(bp->dev);
5000 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5001 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
5002 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
5003 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
5005 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
5008 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
5009 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5013 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
5019 bnx2_clear_ring_states(struct bnx2 *bp)
5021 struct bnx2_napi *bnapi;
5022 struct bnx2_tx_ring_info *txr;
5023 struct bnx2_rx_ring_info *rxr;
5026 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5027 bnapi = &bp->bnx2_napi[i];
5028 txr = &bnapi->tx_ring;
5029 rxr = &bnapi->rx_ring;
5032 txr->hw_tx_cons = 0;
5033 rxr->rx_prod_bseq = 0;
5036 rxr->rx_pg_prod = 0;
5037 rxr->rx_pg_cons = 0;
5042 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5044 u32 val, offset0, offset1, offset2, offset3;
5045 u32 cid_addr = GET_CID_ADDR(cid);
5047 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5048 offset0 = BNX2_L2CTX_TYPE_XI;
5049 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5050 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5051 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5053 offset0 = BNX2_L2CTX_TYPE;
5054 offset1 = BNX2_L2CTX_CMD_TYPE;
5055 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5056 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5058 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5059 bnx2_ctx_wr(bp, cid_addr, offset0, val);
5061 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5062 bnx2_ctx_wr(bp, cid_addr, offset1, val);
5064 val = (u64) txr->tx_desc_mapping >> 32;
5065 bnx2_ctx_wr(bp, cid_addr, offset2, val);
5067 val = (u64) txr->tx_desc_mapping & 0xffffffff;
5068 bnx2_ctx_wr(bp, cid_addr, offset3, val);
5072 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5076 struct bnx2_napi *bnapi;
5077 struct bnx2_tx_ring_info *txr;
5079 bnapi = &bp->bnx2_napi[ring_num];
5080 txr = &bnapi->tx_ring;
5085 cid = TX_TSS_CID + ring_num - 1;
5087 bp->tx_wake_thresh = bp->tx_ring_size / 2;
5089 txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
5091 txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5092 txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5095 txr->tx_prod_bseq = 0;
5097 txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5098 txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5100 bnx2_init_tx_context(bp, cid, txr);
5104 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5110 for (i = 0; i < num_rings; i++) {
5113 rxbd = &rx_ring[i][0];
5114 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5115 rxbd->rx_bd_len = buf_size;
5116 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5118 if (i == (num_rings - 1))
5122 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5123 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5128 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5131 u16 prod, ring_prod;
5132 u32 cid, rx_cid_addr, val;
5133 struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5134 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5139 cid = RX_RSS_CID + ring_num - 1;
5141 rx_cid_addr = GET_CID_ADDR(cid);
5143 bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5144 bp->rx_buf_use_size, bp->rx_max_ring);
5146 bnx2_init_rx_context(bp, cid);
5148 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5149 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5150 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5153 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5154 if (bp->rx_pg_ring_size) {
5155 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5156 rxr->rx_pg_desc_mapping,
5157 PAGE_SIZE, bp->rx_max_pg_ring);
5158 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5159 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5160 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5161 BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5163 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5164 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5166 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5167 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5169 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5170 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5173 val = (u64) rxr->rx_desc_mapping[0] >> 32;
5174 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5176 val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5177 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5179 ring_prod = prod = rxr->rx_pg_prod;
5180 for (i = 0; i < bp->rx_pg_ring_size; i++) {
5181 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0) {
5182 netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5183 ring_num, i, bp->rx_pg_ring_size);
5186 prod = NEXT_RX_BD(prod);
5187 ring_prod = RX_PG_RING_IDX(prod);
5189 rxr->rx_pg_prod = prod;
5191 ring_prod = prod = rxr->rx_prod;
5192 for (i = 0; i < bp->rx_ring_size; i++) {
5193 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0) {
5194 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5195 ring_num, i, bp->rx_ring_size);
5198 prod = NEXT_RX_BD(prod);
5199 ring_prod = RX_RING_IDX(prod);
5201 rxr->rx_prod = prod;
5203 rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5204 rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5205 rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5207 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5208 REG_WR16(bp, rxr->rx_bidx_addr, prod);
5210 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5214 bnx2_init_all_rings(struct bnx2 *bp)
5219 bnx2_clear_ring_states(bp);
5221 REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5222 for (i = 0; i < bp->num_tx_rings; i++)
5223 bnx2_init_tx_ring(bp, i);
5225 if (bp->num_tx_rings > 1)
5226 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5229 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5230 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5232 for (i = 0; i < bp->num_rx_rings; i++)
5233 bnx2_init_rx_ring(bp, i);
5235 if (bp->num_rx_rings > 1) {
5237 u8 *tbl = (u8 *) &tbl_32;
5239 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
5240 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
5242 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5243 tbl[i % 4] = i % (bp->num_rx_rings - 1);
5246 BNX2_RXP_SCRATCH_RSS_TBL + i,
5247 cpu_to_be32(tbl_32));
5250 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5251 BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5253 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5258 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5260 u32 max, num_rings = 1;
5262 while (ring_size > MAX_RX_DESC_CNT) {
5263 ring_size -= MAX_RX_DESC_CNT;
5266 /* round to next power of 2 */
5268 while ((max & num_rings) == 0)
5271 if (num_rings != max)
5278 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5280 u32 rx_size, rx_space, jumbo_size;
5282 /* 8 for CRC and VLAN */
5283 rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5285 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5286 sizeof(struct skb_shared_info);
5288 bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5289 bp->rx_pg_ring_size = 0;
5290 bp->rx_max_pg_ring = 0;
5291 bp->rx_max_pg_ring_idx = 0;
5292 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5293 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5295 jumbo_size = size * pages;
5296 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5297 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5299 bp->rx_pg_ring_size = jumbo_size;
5300 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5302 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5303 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5304 bp->rx_copy_thresh = 0;
5307 bp->rx_buf_use_size = rx_size;
5309 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5310 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5311 bp->rx_ring_size = size;
5312 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5313 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5317 bnx2_free_tx_skbs(struct bnx2 *bp)
5321 for (i = 0; i < bp->num_tx_rings; i++) {
5322 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5323 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5326 if (txr->tx_buf_ring == NULL)
5329 for (j = 0; j < TX_DESC_CNT; ) {
5330 struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5331 struct sk_buff *skb = tx_buf->skb;
5339 pci_unmap_single(bp->pdev,
5340 dma_unmap_addr(tx_buf, mapping),
5346 last = tx_buf->nr_frags;
5348 for (k = 0; k < last; k++, j++) {
5349 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5350 pci_unmap_page(bp->pdev,
5351 dma_unmap_addr(tx_buf, mapping),
5352 skb_shinfo(skb)->frags[k].size,
5361 bnx2_free_rx_skbs(struct bnx2 *bp)
5365 for (i = 0; i < bp->num_rx_rings; i++) {
5366 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5367 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5370 if (rxr->rx_buf_ring == NULL)
5373 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5374 struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5375 struct sk_buff *skb = rx_buf->skb;
5380 pci_unmap_single(bp->pdev,
5381 dma_unmap_addr(rx_buf, mapping),
5382 bp->rx_buf_use_size,
5383 PCI_DMA_FROMDEVICE);
5389 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5390 bnx2_free_rx_page(bp, rxr, j);
5395 bnx2_free_skbs(struct bnx2 *bp)
5397 bnx2_free_tx_skbs(bp);
5398 bnx2_free_rx_skbs(bp);
5402 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5406 rc = bnx2_reset_chip(bp, reset_code);
5411 if ((rc = bnx2_init_chip(bp)) != 0)
5414 bnx2_init_all_rings(bp);
5419 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5423 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5426 spin_lock_bh(&bp->phy_lock);
5427 bnx2_init_phy(bp, reset_phy);
5429 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5430 bnx2_remote_phy_event(bp);
5431 spin_unlock_bh(&bp->phy_lock);
5436 bnx2_shutdown_chip(struct bnx2 *bp)
5440 if (bp->flags & BNX2_FLAG_NO_WOL)
5441 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5443 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5445 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5447 return bnx2_reset_chip(bp, reset_code);
5451 bnx2_test_registers(struct bnx2 *bp)
5455 static const struct {
5458 #define BNX2_FL_NOT_5709 1
5462 { 0x006c, 0, 0x00000000, 0x0000003f },
5463 { 0x0090, 0, 0xffffffff, 0x00000000 },
5464 { 0x0094, 0, 0x00000000, 0x00000000 },
5466 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5467 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5468 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5469 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5470 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5471 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5472 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5473 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5474 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5476 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5477 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5478 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5479 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5480 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5481 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5483 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5484 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5485 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
5487 { 0x1000, 0, 0x00000000, 0x00000001 },
5488 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5490 { 0x1408, 0, 0x01c00800, 0x00000000 },
5491 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5492 { 0x14a8, 0, 0x00000000, 0x000001ff },
5493 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5494 { 0x14b0, 0, 0x00000002, 0x00000001 },
5495 { 0x14b8, 0, 0x00000000, 0x00000000 },
5496 { 0x14c0, 0, 0x00000000, 0x00000009 },
5497 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5498 { 0x14cc, 0, 0x00000000, 0x00000001 },
5499 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5501 { 0x1800, 0, 0x00000000, 0x00000001 },
5502 { 0x1804, 0, 0x00000000, 0x00000003 },
5504 { 0x2800, 0, 0x00000000, 0x00000001 },
5505 { 0x2804, 0, 0x00000000, 0x00003f01 },
5506 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5507 { 0x2810, 0, 0xffff0000, 0x00000000 },
5508 { 0x2814, 0, 0xffff0000, 0x00000000 },
5509 { 0x2818, 0, 0xffff0000, 0x00000000 },
5510 { 0x281c, 0, 0xffff0000, 0x00000000 },
5511 { 0x2834, 0, 0xffffffff, 0x00000000 },
5512 { 0x2840, 0, 0x00000000, 0xffffffff },
5513 { 0x2844, 0, 0x00000000, 0xffffffff },
5514 { 0x2848, 0, 0xffffffff, 0x00000000 },
5515 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5517 { 0x2c00, 0, 0x00000000, 0x00000011 },
5518 { 0x2c04, 0, 0x00000000, 0x00030007 },
5520 { 0x3c00, 0, 0x00000000, 0x00000001 },
5521 { 0x3c04, 0, 0x00000000, 0x00070000 },
5522 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5523 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5524 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5525 { 0x3c14, 0, 0x00000000, 0xffffffff },
5526 { 0x3c18, 0, 0x00000000, 0xffffffff },
5527 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5528 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5530 { 0x5004, 0, 0x00000000, 0x0000007f },
5531 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5533 { 0x5c00, 0, 0x00000000, 0x00000001 },
5534 { 0x5c04, 0, 0x00000000, 0x0003000f },
5535 { 0x5c08, 0, 0x00000003, 0x00000000 },
5536 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5537 { 0x5c10, 0, 0x00000000, 0xffffffff },
5538 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5539 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5540 { 0x5c88, 0, 0x00000000, 0x00077373 },
5541 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5543 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5544 { 0x680c, 0, 0xffffffff, 0x00000000 },
5545 { 0x6810, 0, 0xffffffff, 0x00000000 },
5546 { 0x6814, 0, 0xffffffff, 0x00000000 },
5547 { 0x6818, 0, 0xffffffff, 0x00000000 },
5548 { 0x681c, 0, 0xffffffff, 0x00000000 },
5549 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5550 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5551 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5552 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5553 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5554 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5555 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5556 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5557 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5558 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5559 { 0x684c, 0, 0xffffffff, 0x00000000 },
5560 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5561 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5562 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5563 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5564 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5565 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5567 { 0xffff, 0, 0x00000000, 0x00000000 },
5572 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5575 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5576 u32 offset, rw_mask, ro_mask, save_val, val;
5577 u16 flags = reg_tbl[i].flags;
5579 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5582 offset = (u32) reg_tbl[i].offset;
5583 rw_mask = reg_tbl[i].rw_mask;
5584 ro_mask = reg_tbl[i].ro_mask;
5586 save_val = readl(bp->regview + offset);
5588 writel(0, bp->regview + offset);
5590 val = readl(bp->regview + offset);
5591 if ((val & rw_mask) != 0) {
5595 if ((val & ro_mask) != (save_val & ro_mask)) {
5599 writel(0xffffffff, bp->regview + offset);
5601 val = readl(bp->regview + offset);
5602 if ((val & rw_mask) != rw_mask) {
5606 if ((val & ro_mask) != (save_val & ro_mask)) {
5610 writel(save_val, bp->regview + offset);
5614 writel(save_val, bp->regview + offset);
5622 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5624 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5625 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5628 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5631 for (offset = 0; offset < size; offset += 4) {
5633 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5635 if (bnx2_reg_rd_ind(bp, start + offset) !=
5645 bnx2_test_memory(struct bnx2 *bp)
5649 static struct mem_entry {
5652 } mem_tbl_5706[] = {
5653 { 0x60000, 0x4000 },
5654 { 0xa0000, 0x3000 },
5655 { 0xe0000, 0x4000 },
5656 { 0x120000, 0x4000 },
5657 { 0x1a0000, 0x4000 },
5658 { 0x160000, 0x4000 },
5662 { 0x60000, 0x4000 },
5663 { 0xa0000, 0x3000 },
5664 { 0xe0000, 0x4000 },
5665 { 0x120000, 0x4000 },
5666 { 0x1a0000, 0x4000 },
5669 struct mem_entry *mem_tbl;
5671 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5672 mem_tbl = mem_tbl_5709;
5674 mem_tbl = mem_tbl_5706;
5676 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5677 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5678 mem_tbl[i].len)) != 0) {
5686 #define BNX2_MAC_LOOPBACK 0
5687 #define BNX2_PHY_LOOPBACK 1
5690 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5692 unsigned int pkt_size, num_pkts, i;
5693 struct sk_buff *skb, *rx_skb;
5694 unsigned char *packet;
5695 u16 rx_start_idx, rx_idx;
5698 struct sw_bd *rx_buf;
5699 struct l2_fhdr *rx_hdr;
5701 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5702 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5703 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5707 txr = &tx_napi->tx_ring;
5708 rxr = &bnapi->rx_ring;
5709 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5710 bp->loopback = MAC_LOOPBACK;
5711 bnx2_set_mac_loopback(bp);
5713 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5714 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5717 bp->loopback = PHY_LOOPBACK;
5718 bnx2_set_phy_loopback(bp);
5723 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5724 skb = netdev_alloc_skb(bp->dev, pkt_size);
5727 packet = skb_put(skb, pkt_size);
5728 memcpy(packet, bp->dev->dev_addr, 6);
5729 memset(packet + 6, 0x0, 8);
5730 for (i = 14; i < pkt_size; i++)
5731 packet[i] = (unsigned char) (i & 0xff);
5733 map = pci_map_single(bp->pdev, skb->data, pkt_size,
5735 if (pci_dma_mapping_error(bp->pdev, map)) {
5740 REG_WR(bp, BNX2_HC_COMMAND,
5741 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5743 REG_RD(bp, BNX2_HC_COMMAND);
5746 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5750 txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5752 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5753 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5754 txbd->tx_bd_mss_nbytes = pkt_size;
5755 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5758 txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5759 txr->tx_prod_bseq += pkt_size;
5761 REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5762 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5766 REG_WR(bp, BNX2_HC_COMMAND,
5767 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5769 REG_RD(bp, BNX2_HC_COMMAND);
5773 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
5776 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5777 goto loopback_test_done;
5779 rx_idx = bnx2_get_hw_rx_cons(bnapi);
5780 if (rx_idx != rx_start_idx + num_pkts) {
5781 goto loopback_test_done;
5784 rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5785 rx_skb = rx_buf->skb;
5787 rx_hdr = rx_buf->desc;
5788 skb_reserve(rx_skb, BNX2_RX_OFFSET);
5790 pci_dma_sync_single_for_cpu(bp->pdev,
5791 dma_unmap_addr(rx_buf, mapping),
5792 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5794 if (rx_hdr->l2_fhdr_status &
5795 (L2_FHDR_ERRORS_BAD_CRC |
5796 L2_FHDR_ERRORS_PHY_DECODE |
5797 L2_FHDR_ERRORS_ALIGNMENT |
5798 L2_FHDR_ERRORS_TOO_SHORT |
5799 L2_FHDR_ERRORS_GIANT_FRAME)) {
5801 goto loopback_test_done;
5804 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5805 goto loopback_test_done;
5808 for (i = 14; i < pkt_size; i++) {
5809 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5810 goto loopback_test_done;
5821 #define BNX2_MAC_LOOPBACK_FAILED 1
5822 #define BNX2_PHY_LOOPBACK_FAILED 2
5823 #define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5824 BNX2_PHY_LOOPBACK_FAILED)
5827 bnx2_test_loopback(struct bnx2 *bp)
5831 if (!netif_running(bp->dev))
5832 return BNX2_LOOPBACK_FAILED;
5834 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5835 spin_lock_bh(&bp->phy_lock);
5836 bnx2_init_phy(bp, 1);
5837 spin_unlock_bh(&bp->phy_lock);
5838 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5839 rc |= BNX2_MAC_LOOPBACK_FAILED;
5840 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5841 rc |= BNX2_PHY_LOOPBACK_FAILED;
5845 #define NVRAM_SIZE 0x200
5846 #define CRC32_RESIDUAL 0xdebb20e3
5849 bnx2_test_nvram(struct bnx2 *bp)
5851 __be32 buf[NVRAM_SIZE / 4];
5852 u8 *data = (u8 *) buf;
5856 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5857 goto test_nvram_done;
5859 magic = be32_to_cpu(buf[0]);
5860 if (magic != 0x669955aa) {
5862 goto test_nvram_done;
5865 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5866 goto test_nvram_done;
5868 csum = ether_crc_le(0x100, data);
5869 if (csum != CRC32_RESIDUAL) {
5871 goto test_nvram_done;
5874 csum = ether_crc_le(0x100, data + 0x100);
5875 if (csum != CRC32_RESIDUAL) {
5884 bnx2_test_link(struct bnx2 *bp)
5888 if (!netif_running(bp->dev))
5891 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5896 spin_lock_bh(&bp->phy_lock);
5897 bnx2_enable_bmsr1(bp);
5898 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5899 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5900 bnx2_disable_bmsr1(bp);
5901 spin_unlock_bh(&bp->phy_lock);
5903 if (bmsr & BMSR_LSTATUS) {
5910 bnx2_test_intr(struct bnx2 *bp)
5915 if (!netif_running(bp->dev))
5918 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5920 /* This register is not touched during run-time. */
5921 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5922 REG_RD(bp, BNX2_HC_COMMAND);
5924 for (i = 0; i < 10; i++) {
5925 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5931 msleep_interruptible(10);
5939 /* Determining link for parallel detection. */
5941 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5943 u32 mode_ctl, an_dbg, exp;
5945 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5948 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5949 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5951 if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5954 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5955 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5956 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5958 if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5961 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5962 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5963 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5965 if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
5972 bnx2_5706_serdes_timer(struct bnx2 *bp)
5976 spin_lock(&bp->phy_lock);
5977 if (bp->serdes_an_pending) {
5978 bp->serdes_an_pending--;
5980 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5983 bp->current_interval = BNX2_TIMER_INTERVAL;
5985 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5987 if (bmcr & BMCR_ANENABLE) {
5988 if (bnx2_5706_serdes_has_link(bp)) {
5989 bmcr &= ~BMCR_ANENABLE;
5990 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5991 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5992 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5996 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5997 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
6000 bnx2_write_phy(bp, 0x17, 0x0f01);
6001 bnx2_read_phy(bp, 0x15, &phy2);
6005 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6006 bmcr |= BMCR_ANENABLE;
6007 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
6009 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
6012 bp->current_interval = BNX2_TIMER_INTERVAL;
6017 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6018 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6019 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6021 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6022 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6023 bnx2_5706s_force_link_dn(bp, 1);
6024 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6027 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6030 spin_unlock(&bp->phy_lock);
6034 bnx2_5708_serdes_timer(struct bnx2 *bp)
6036 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
6039 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
6040 bp->serdes_an_pending = 0;
6044 spin_lock(&bp->phy_lock);
6045 if (bp->serdes_an_pending)
6046 bp->serdes_an_pending--;
6047 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6050 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6051 if (bmcr & BMCR_ANENABLE) {
6052 bnx2_enable_forced_2g5(bp);
6053 bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6055 bnx2_disable_forced_2g5(bp);
6056 bp->serdes_an_pending = 2;
6057 bp->current_interval = BNX2_TIMER_INTERVAL;
6061 bp->current_interval = BNX2_TIMER_INTERVAL;
6063 spin_unlock(&bp->phy_lock);
6067 bnx2_timer(unsigned long data)
6069 struct bnx2 *bp = (struct bnx2 *) data;
6071 if (!netif_running(bp->dev))
6074 if (atomic_read(&bp->intr_sem) != 0)
6075 goto bnx2_restart_timer;
6077 if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6078 BNX2_FLAG_USING_MSI)
6079 bnx2_chk_missed_msi(bp);
6081 bnx2_send_heart_beat(bp);
6083 bp->stats_blk->stat_FwRxDrop =
6084 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6086 /* workaround occasional corrupted counters */
6087 if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6088 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6089 BNX2_HC_COMMAND_STATS_NOW);
6091 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6092 if (CHIP_NUM(bp) == CHIP_NUM_5706)
6093 bnx2_5706_serdes_timer(bp);
6095 bnx2_5708_serdes_timer(bp);
6099 mod_timer(&bp->timer, jiffies + bp->current_interval);
6103 bnx2_request_irq(struct bnx2 *bp)
6105 unsigned long flags;
6106 struct bnx2_irq *irq;
6109 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6112 flags = IRQF_SHARED;
6114 for (i = 0; i < bp->irq_nvecs; i++) {
6115 irq = &bp->irq_tbl[i];
6116 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6126 bnx2_free_irq(struct bnx2 *bp)
6128 struct bnx2_irq *irq;
6131 for (i = 0; i < bp->irq_nvecs; i++) {
6132 irq = &bp->irq_tbl[i];
6134 free_irq(irq->vector, &bp->bnx2_napi[i]);
6137 if (bp->flags & BNX2_FLAG_USING_MSI)
6138 pci_disable_msi(bp->pdev);
6139 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6140 pci_disable_msix(bp->pdev);
6142 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6146 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6149 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6150 struct net_device *dev = bp->dev;
6151 const int len = sizeof(bp->irq_tbl[0].name);
6153 bnx2_setup_msix_tbl(bp);
6154 REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6155 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6156 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6158 /* Need to flush the previous three writes to ensure MSI-X
6159 * is setup properly */
6160 REG_RD(bp, BNX2_PCI_MSIX_CONTROL);
6162 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6163 msix_ent[i].entry = i;
6164 msix_ent[i].vector = 0;
6167 rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
6171 bp->irq_nvecs = msix_vecs;
6172 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6173 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6174 bp->irq_tbl[i].vector = msix_ent[i].vector;
6175 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6176 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6181 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6183 int cpus = num_online_cpus();
6184 int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
6186 bp->irq_tbl[0].handler = bnx2_interrupt;
6187 strcpy(bp->irq_tbl[0].name, bp->dev->name);
6189 bp->irq_tbl[0].vector = bp->pdev->irq;
6191 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
6192 bnx2_enable_msix(bp, msix_vecs);
6194 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6195 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6196 if (pci_enable_msi(bp->pdev) == 0) {
6197 bp->flags |= BNX2_FLAG_USING_MSI;
6198 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6199 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6200 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6202 bp->irq_tbl[0].handler = bnx2_msi;
6204 bp->irq_tbl[0].vector = bp->pdev->irq;
6208 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6209 bp->dev->real_num_tx_queues = bp->num_tx_rings;
6211 bp->num_rx_rings = bp->irq_nvecs;
6214 /* Called with rtnl_lock */
6216 bnx2_open(struct net_device *dev)
6218 struct bnx2 *bp = netdev_priv(dev);
6221 netif_carrier_off(dev);
6223 bnx2_set_power_state(bp, PCI_D0);
6224 bnx2_disable_int(bp);
6226 bnx2_setup_int_mode(bp, disable_msi);
6228 bnx2_napi_enable(bp);
6229 rc = bnx2_alloc_mem(bp);
6233 rc = bnx2_request_irq(bp);
6237 rc = bnx2_init_nic(bp, 1);
6241 mod_timer(&bp->timer, jiffies + bp->current_interval);
6243 atomic_set(&bp->intr_sem, 0);
6245 memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6247 bnx2_enable_int(bp);
6249 if (bp->flags & BNX2_FLAG_USING_MSI) {
6250 /* Test MSI to make sure it is working
6251 * If MSI test fails, go back to INTx mode
6253 if (bnx2_test_intr(bp) != 0) {
6254 netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
6256 bnx2_disable_int(bp);
6259 bnx2_setup_int_mode(bp, 1);
6261 rc = bnx2_init_nic(bp, 0);
6264 rc = bnx2_request_irq(bp);
6267 del_timer_sync(&bp->timer);
6270 bnx2_enable_int(bp);
6273 if (bp->flags & BNX2_FLAG_USING_MSI)
6274 netdev_info(dev, "using MSI\n");
6275 else if (bp->flags & BNX2_FLAG_USING_MSIX)
6276 netdev_info(dev, "using MSIX\n");
6278 netif_tx_start_all_queues(dev);
6283 bnx2_napi_disable(bp);
6292 bnx2_reset_task(struct work_struct *work)
6294 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6297 if (!netif_running(bp->dev)) {
6302 bnx2_netif_stop(bp, true);
6304 bnx2_init_nic(bp, 1);
6306 atomic_set(&bp->intr_sem, 1);
6307 bnx2_netif_start(bp, true);
6312 bnx2_dump_state(struct bnx2 *bp)
6314 struct net_device *dev = bp->dev;
6317 netdev_err(dev, "DEBUG: intr_sem[%x]\n", atomic_read(&bp->intr_sem));
6318 netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] EMAC_RX_STATUS[%08x]\n",
6319 REG_RD(bp, BNX2_EMAC_TX_STATUS),
6320 REG_RD(bp, BNX2_EMAC_RX_STATUS));
6321 netdev_err(dev, "DEBUG: RPM_MGMT_PKT_CTRL[%08x]\n",
6322 REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6323 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6324 mcp_p0 = BNX2_MCP_STATE_P0;
6325 mcp_p1 = BNX2_MCP_STATE_P1;
6327 mcp_p0 = BNX2_MCP_STATE_P0_5708;
6328 mcp_p1 = BNX2_MCP_STATE_P1_5708;
6330 netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
6331 bnx2_reg_rd_ind(bp, mcp_p0), bnx2_reg_rd_ind(bp, mcp_p1));
6332 netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6333 REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
6334 if (bp->flags & BNX2_FLAG_USING_MSIX)
6335 netdev_err(dev, "DEBUG: PBA[%08x]\n",
6336 REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
6340 bnx2_tx_timeout(struct net_device *dev)
6342 struct bnx2 *bp = netdev_priv(dev);
6344 bnx2_dump_state(bp);
6346 /* This allows the netif to be shutdown gracefully before resetting */
6347 schedule_work(&bp->reset_task);
6351 /* Called with rtnl_lock */
6353 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6355 struct bnx2 *bp = netdev_priv(dev);
6357 if (netif_running(dev))
6358 bnx2_netif_stop(bp, false);
6362 if (!netif_running(dev))
6365 bnx2_set_rx_mode(dev);
6366 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6367 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
6369 bnx2_netif_start(bp, false);
6373 /* Called with netif_tx_lock.
6374 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6375 * netif_wake_queue().
6378 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6380 struct bnx2 *bp = netdev_priv(dev);
6383 struct sw_tx_bd *tx_buf;
6384 u32 len, vlan_tag_flags, last_frag, mss;
6385 u16 prod, ring_prod;
6387 struct bnx2_napi *bnapi;
6388 struct bnx2_tx_ring_info *txr;
6389 struct netdev_queue *txq;
6391 /* Determine which tx ring we will be placed on */
6392 i = skb_get_queue_mapping(skb);
6393 bnapi = &bp->bnx2_napi[i];
6394 txr = &bnapi->tx_ring;
6395 txq = netdev_get_tx_queue(dev, i);
6397 if (unlikely(bnx2_tx_avail(bp, txr) <
6398 (skb_shinfo(skb)->nr_frags + 1))) {
6399 netif_tx_stop_queue(txq);
6400 netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
6402 return NETDEV_TX_BUSY;
6404 len = skb_headlen(skb);
6405 prod = txr->tx_prod;
6406 ring_prod = TX_RING_IDX(prod);
6409 if (skb->ip_summed == CHECKSUM_PARTIAL) {
6410 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6414 if (bp->vlgrp && vlan_tx_tag_present(skb)) {
6416 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6419 if ((mss = skb_shinfo(skb)->gso_size)) {
6423 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6425 tcp_opt_len = tcp_optlen(skb);
6427 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6428 u32 tcp_off = skb_transport_offset(skb) -
6429 sizeof(struct ipv6hdr) - ETH_HLEN;
6431 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6432 TX_BD_FLAGS_SW_FLAGS;
6433 if (likely(tcp_off == 0))
6434 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6437 vlan_tag_flags |= ((tcp_off & 0x3) <<
6438 TX_BD_FLAGS_TCP6_OFF0_SHL) |
6439 ((tcp_off & 0x10) <<
6440 TX_BD_FLAGS_TCP6_OFF4_SHL);
6441 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6445 if (tcp_opt_len || (iph->ihl > 5)) {
6446 vlan_tag_flags |= ((iph->ihl - 5) +
6447 (tcp_opt_len >> 2)) << 8;
6453 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6454 if (pci_dma_mapping_error(bp->pdev, mapping)) {
6456 return NETDEV_TX_OK;
6459 tx_buf = &txr->tx_buf_ring[ring_prod];
6461 dma_unmap_addr_set(tx_buf, mapping, mapping);
6463 txbd = &txr->tx_desc_ring[ring_prod];
6465 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6466 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6467 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6468 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6470 last_frag = skb_shinfo(skb)->nr_frags;
6471 tx_buf->nr_frags = last_frag;
6472 tx_buf->is_gso = skb_is_gso(skb);
6474 for (i = 0; i < last_frag; i++) {
6475 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6477 prod = NEXT_TX_BD(prod);
6478 ring_prod = TX_RING_IDX(prod);
6479 txbd = &txr->tx_desc_ring[ring_prod];
6482 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
6483 len, PCI_DMA_TODEVICE);
6484 if (pci_dma_mapping_error(bp->pdev, mapping))
6486 dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
6489 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6490 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6491 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6492 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6495 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6497 prod = NEXT_TX_BD(prod);
6498 txr->tx_prod_bseq += skb->len;
6500 REG_WR16(bp, txr->tx_bidx_addr, prod);
6501 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6505 txr->tx_prod = prod;
6507 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6508 netif_tx_stop_queue(txq);
6509 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6510 netif_tx_wake_queue(txq);
6513 return NETDEV_TX_OK;
6515 /* save value of frag that failed */
6518 /* start back at beginning and unmap skb */
6519 prod = txr->tx_prod;
6520 ring_prod = TX_RING_IDX(prod);
6521 tx_buf = &txr->tx_buf_ring[ring_prod];
6523 pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
6524 skb_headlen(skb), PCI_DMA_TODEVICE);
6526 /* unmap remaining mapped pages */
6527 for (i = 0; i < last_frag; i++) {
6528 prod = NEXT_TX_BD(prod);
6529 ring_prod = TX_RING_IDX(prod);
6530 tx_buf = &txr->tx_buf_ring[ring_prod];
6531 pci_unmap_page(bp->pdev, dma_unmap_addr(tx_buf, mapping),
6532 skb_shinfo(skb)->frags[i].size,
6537 return NETDEV_TX_OK;
6540 /* Called with rtnl_lock */
6542 bnx2_close(struct net_device *dev)
6544 struct bnx2 *bp = netdev_priv(dev);
6546 cancel_work_sync(&bp->reset_task);
6548 bnx2_disable_int_sync(bp);
6549 bnx2_napi_disable(bp);
6550 del_timer_sync(&bp->timer);
6551 bnx2_shutdown_chip(bp);
6557 netif_carrier_off(bp->dev);
6558 bnx2_set_power_state(bp, PCI_D3hot);
6563 bnx2_save_stats(struct bnx2 *bp)
6565 u32 *hw_stats = (u32 *) bp->stats_blk;
6566 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6569 /* The 1st 10 counters are 64-bit counters */
6570 for (i = 0; i < 20; i += 2) {
6574 hi = temp_stats[i] + hw_stats[i];
6575 lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
6576 if (lo > 0xffffffff)
6579 temp_stats[i + 1] = lo & 0xffffffff;
6582 for ( ; i < sizeof(struct statistics_block) / 4; i++)
6583 temp_stats[i] += hw_stats[i];
6586 #define GET_64BIT_NET_STATS64(ctr) \
6587 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
6588 (unsigned long) (ctr##_lo)
6590 #define GET_64BIT_NET_STATS32(ctr) \
6593 #if (BITS_PER_LONG == 64)
6594 #define GET_64BIT_NET_STATS(ctr) \
6595 GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \
6596 GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
6598 #define GET_64BIT_NET_STATS(ctr) \
6599 GET_64BIT_NET_STATS32(bp->stats_blk->ctr) + \
6600 GET_64BIT_NET_STATS32(bp->temp_stats_blk->ctr)
6603 #define GET_32BIT_NET_STATS(ctr) \
6604 (unsigned long) (bp->stats_blk->ctr + \
6605 bp->temp_stats_blk->ctr)
6607 static struct net_device_stats *
6608 bnx2_get_stats(struct net_device *dev)
6610 struct bnx2 *bp = netdev_priv(dev);
6611 struct net_device_stats *net_stats = &dev->stats;
6613 if (bp->stats_blk == NULL) {
6616 net_stats->rx_packets =
6617 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6618 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6619 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
6621 net_stats->tx_packets =
6622 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6623 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6624 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
6626 net_stats->rx_bytes =
6627 GET_64BIT_NET_STATS(stat_IfHCInOctets);
6629 net_stats->tx_bytes =
6630 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
6632 net_stats->multicast =
6633 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts);
6635 net_stats->collisions =
6636 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
6638 net_stats->rx_length_errors =
6639 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6640 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
6642 net_stats->rx_over_errors =
6643 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6644 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
6646 net_stats->rx_frame_errors =
6647 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
6649 net_stats->rx_crc_errors =
6650 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
6652 net_stats->rx_errors = net_stats->rx_length_errors +
6653 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6654 net_stats->rx_crc_errors;
6656 net_stats->tx_aborted_errors =
6657 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6658 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
6660 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6661 (CHIP_ID(bp) == CHIP_ID_5708_A0))
6662 net_stats->tx_carrier_errors = 0;
6664 net_stats->tx_carrier_errors =
6665 GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
6668 net_stats->tx_errors =
6669 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
6670 net_stats->tx_aborted_errors +
6671 net_stats->tx_carrier_errors;
6673 net_stats->rx_missed_errors =
6674 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6675 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6676 GET_32BIT_NET_STATS(stat_FwRxDrop);
6681 /* All ethtool functions called with rtnl_lock */
6684 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6686 struct bnx2 *bp = netdev_priv(dev);
6687 int support_serdes = 0, support_copper = 0;
6689 cmd->supported = SUPPORTED_Autoneg;
6690 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6693 } else if (bp->phy_port == PORT_FIBRE)
6698 if (support_serdes) {
6699 cmd->supported |= SUPPORTED_1000baseT_Full |
6701 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6702 cmd->supported |= SUPPORTED_2500baseX_Full;
6705 if (support_copper) {
6706 cmd->supported |= SUPPORTED_10baseT_Half |
6707 SUPPORTED_10baseT_Full |
6708 SUPPORTED_100baseT_Half |
6709 SUPPORTED_100baseT_Full |
6710 SUPPORTED_1000baseT_Full |
6715 spin_lock_bh(&bp->phy_lock);
6716 cmd->port = bp->phy_port;
6717 cmd->advertising = bp->advertising;
6719 if (bp->autoneg & AUTONEG_SPEED) {
6720 cmd->autoneg = AUTONEG_ENABLE;
6723 cmd->autoneg = AUTONEG_DISABLE;
6726 if (netif_carrier_ok(dev)) {
6727 cmd->speed = bp->line_speed;
6728 cmd->duplex = bp->duplex;
6734 spin_unlock_bh(&bp->phy_lock);
6736 cmd->transceiver = XCVR_INTERNAL;
6737 cmd->phy_address = bp->phy_addr;
6743 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6745 struct bnx2 *bp = netdev_priv(dev);
6746 u8 autoneg = bp->autoneg;
6747 u8 req_duplex = bp->req_duplex;
6748 u16 req_line_speed = bp->req_line_speed;
6749 u32 advertising = bp->advertising;
6752 spin_lock_bh(&bp->phy_lock);
6754 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6755 goto err_out_unlock;
6757 if (cmd->port != bp->phy_port &&
6758 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6759 goto err_out_unlock;
6761 /* If device is down, we can store the settings only if the user
6762 * is setting the currently active port.
6764 if (!netif_running(dev) && cmd->port != bp->phy_port)
6765 goto err_out_unlock;
6767 if (cmd->autoneg == AUTONEG_ENABLE) {
6768 autoneg |= AUTONEG_SPEED;
6770 advertising = cmd->advertising;
6771 if (cmd->port == PORT_TP) {
6772 advertising &= ETHTOOL_ALL_COPPER_SPEED;
6774 advertising = ETHTOOL_ALL_COPPER_SPEED;
6776 advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6778 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6780 advertising |= ADVERTISED_Autoneg;
6783 if (cmd->port == PORT_FIBRE) {
6784 if ((cmd->speed != SPEED_1000 &&
6785 cmd->speed != SPEED_2500) ||
6786 (cmd->duplex != DUPLEX_FULL))
6787 goto err_out_unlock;
6789 if (cmd->speed == SPEED_2500 &&
6790 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6791 goto err_out_unlock;
6793 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6794 goto err_out_unlock;
6796 autoneg &= ~AUTONEG_SPEED;
6797 req_line_speed = cmd->speed;
6798 req_duplex = cmd->duplex;
6802 bp->autoneg = autoneg;
6803 bp->advertising = advertising;
6804 bp->req_line_speed = req_line_speed;
6805 bp->req_duplex = req_duplex;
6808 /* If device is down, the new settings will be picked up when it is
6811 if (netif_running(dev))
6812 err = bnx2_setup_phy(bp, cmd->port);
6815 spin_unlock_bh(&bp->phy_lock);
6821 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6823 struct bnx2 *bp = netdev_priv(dev);
6825 strcpy(info->driver, DRV_MODULE_NAME);
6826 strcpy(info->version, DRV_MODULE_VERSION);
6827 strcpy(info->bus_info, pci_name(bp->pdev));
6828 strcpy(info->fw_version, bp->fw_version);
6831 #define BNX2_REGDUMP_LEN (32 * 1024)
6834 bnx2_get_regs_len(struct net_device *dev)
6836 return BNX2_REGDUMP_LEN;
6840 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6842 u32 *p = _p, i, offset;
6844 struct bnx2 *bp = netdev_priv(dev);
6845 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6846 0x0800, 0x0880, 0x0c00, 0x0c10,
6847 0x0c30, 0x0d08, 0x1000, 0x101c,
6848 0x1040, 0x1048, 0x1080, 0x10a4,
6849 0x1400, 0x1490, 0x1498, 0x14f0,
6850 0x1500, 0x155c, 0x1580, 0x15dc,
6851 0x1600, 0x1658, 0x1680, 0x16d8,
6852 0x1800, 0x1820, 0x1840, 0x1854,
6853 0x1880, 0x1894, 0x1900, 0x1984,
6854 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6855 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6856 0x2000, 0x2030, 0x23c0, 0x2400,
6857 0x2800, 0x2820, 0x2830, 0x2850,
6858 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6859 0x3c00, 0x3c94, 0x4000, 0x4010,
6860 0x4080, 0x4090, 0x43c0, 0x4458,
6861 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6862 0x4fc0, 0x5010, 0x53c0, 0x5444,
6863 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6864 0x5fc0, 0x6000, 0x6400, 0x6428,
6865 0x6800, 0x6848, 0x684c, 0x6860,
6866 0x6888, 0x6910, 0x8000 };
6870 memset(p, 0, BNX2_REGDUMP_LEN);
6872 if (!netif_running(bp->dev))
6876 offset = reg_boundaries[0];
6878 while (offset < BNX2_REGDUMP_LEN) {
6879 *p++ = REG_RD(bp, offset);
6881 if (offset == reg_boundaries[i + 1]) {
6882 offset = reg_boundaries[i + 2];
6883 p = (u32 *) (orig_p + offset);
6890 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6892 struct bnx2 *bp = netdev_priv(dev);
6894 if (bp->flags & BNX2_FLAG_NO_WOL) {
6899 wol->supported = WAKE_MAGIC;
6901 wol->wolopts = WAKE_MAGIC;
6905 memset(&wol->sopass, 0, sizeof(wol->sopass));
6909 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6911 struct bnx2 *bp = netdev_priv(dev);
6913 if (wol->wolopts & ~WAKE_MAGIC)
6916 if (wol->wolopts & WAKE_MAGIC) {
6917 if (bp->flags & BNX2_FLAG_NO_WOL)
6929 bnx2_nway_reset(struct net_device *dev)
6931 struct bnx2 *bp = netdev_priv(dev);
6934 if (!netif_running(dev))
6937 if (!(bp->autoneg & AUTONEG_SPEED)) {
6941 spin_lock_bh(&bp->phy_lock);
6943 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6946 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6947 spin_unlock_bh(&bp->phy_lock);
6951 /* Force a link down visible on the other side */
6952 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6953 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6954 spin_unlock_bh(&bp->phy_lock);
6958 spin_lock_bh(&bp->phy_lock);
6960 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
6961 bp->serdes_an_pending = 1;
6962 mod_timer(&bp->timer, jiffies + bp->current_interval);
6965 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6966 bmcr &= ~BMCR_LOOPBACK;
6967 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6969 spin_unlock_bh(&bp->phy_lock);
6975 bnx2_get_link(struct net_device *dev)
6977 struct bnx2 *bp = netdev_priv(dev);
6983 bnx2_get_eeprom_len(struct net_device *dev)
6985 struct bnx2 *bp = netdev_priv(dev);
6987 if (bp->flash_info == NULL)
6990 return (int) bp->flash_size;
6994 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6997 struct bnx2 *bp = netdev_priv(dev);
7000 if (!netif_running(dev))
7003 /* parameters already validated in ethtool_get_eeprom */
7005 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
7011 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
7014 struct bnx2 *bp = netdev_priv(dev);
7017 if (!netif_running(dev))
7020 /* parameters already validated in ethtool_set_eeprom */
7022 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7028 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7030 struct bnx2 *bp = netdev_priv(dev);
7032 memset(coal, 0, sizeof(struct ethtool_coalesce));
7034 coal->rx_coalesce_usecs = bp->rx_ticks;
7035 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7036 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7037 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7039 coal->tx_coalesce_usecs = bp->tx_ticks;
7040 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7041 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7042 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7044 coal->stats_block_coalesce_usecs = bp->stats_ticks;
7050 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7052 struct bnx2 *bp = netdev_priv(dev);
7054 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7055 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7057 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
7058 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7060 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7061 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7063 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7064 if (bp->rx_quick_cons_trip_int > 0xff)
7065 bp->rx_quick_cons_trip_int = 0xff;
7067 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7068 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7070 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7071 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7073 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7074 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7076 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7077 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7080 bp->stats_ticks = coal->stats_block_coalesce_usecs;
7081 if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
7082 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7083 bp->stats_ticks = USEC_PER_SEC;
7085 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7086 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7087 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7089 if (netif_running(bp->dev)) {
7090 bnx2_netif_stop(bp, true);
7091 bnx2_init_nic(bp, 0);
7092 bnx2_netif_start(bp, true);
7099 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7101 struct bnx2 *bp = netdev_priv(dev);
7103 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
7104 ering->rx_mini_max_pending = 0;
7105 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
7107 ering->rx_pending = bp->rx_ring_size;
7108 ering->rx_mini_pending = 0;
7109 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
7111 ering->tx_max_pending = MAX_TX_DESC_CNT;
7112 ering->tx_pending = bp->tx_ring_size;
7116 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
7118 if (netif_running(bp->dev)) {
7119 /* Reset will erase chipset stats; save them */
7120 bnx2_save_stats(bp);
7122 bnx2_netif_stop(bp, true);
7123 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7128 bnx2_set_rx_ring_size(bp, rx);
7129 bp->tx_ring_size = tx;
7131 if (netif_running(bp->dev)) {
7134 rc = bnx2_alloc_mem(bp);
7136 rc = bnx2_init_nic(bp, 0);
7139 bnx2_napi_enable(bp);
7144 mutex_lock(&bp->cnic_lock);
7145 /* Let cnic know about the new status block. */
7146 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7147 bnx2_setup_cnic_irq_info(bp);
7148 mutex_unlock(&bp->cnic_lock);
7150 bnx2_netif_start(bp, true);
7156 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7158 struct bnx2 *bp = netdev_priv(dev);
7161 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
7162 (ering->tx_pending > MAX_TX_DESC_CNT) ||
7163 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7167 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
7172 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7174 struct bnx2 *bp = netdev_priv(dev);
7176 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7177 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7178 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7182 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7184 struct bnx2 *bp = netdev_priv(dev);
7186 bp->req_flow_ctrl = 0;
7187 if (epause->rx_pause)
7188 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7189 if (epause->tx_pause)
7190 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7192 if (epause->autoneg) {
7193 bp->autoneg |= AUTONEG_FLOW_CTRL;
7196 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7199 if (netif_running(dev)) {
7200 spin_lock_bh(&bp->phy_lock);
7201 bnx2_setup_phy(bp, bp->phy_port);
7202 spin_unlock_bh(&bp->phy_lock);
7209 bnx2_get_rx_csum(struct net_device *dev)
7211 struct bnx2 *bp = netdev_priv(dev);
7217 bnx2_set_rx_csum(struct net_device *dev, u32 data)
7219 struct bnx2 *bp = netdev_priv(dev);
7226 bnx2_set_tso(struct net_device *dev, u32 data)
7228 struct bnx2 *bp = netdev_priv(dev);
7231 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7232 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7233 dev->features |= NETIF_F_TSO6;
7235 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
7241 char string[ETH_GSTRING_LEN];
7242 } bnx2_stats_str_arr[] = {
7244 { "rx_error_bytes" },
7246 { "tx_error_bytes" },
7247 { "rx_ucast_packets" },
7248 { "rx_mcast_packets" },
7249 { "rx_bcast_packets" },
7250 { "tx_ucast_packets" },
7251 { "tx_mcast_packets" },
7252 { "tx_bcast_packets" },
7253 { "tx_mac_errors" },
7254 { "tx_carrier_errors" },
7255 { "rx_crc_errors" },
7256 { "rx_align_errors" },
7257 { "tx_single_collisions" },
7258 { "tx_multi_collisions" },
7260 { "tx_excess_collisions" },
7261 { "tx_late_collisions" },
7262 { "tx_total_collisions" },
7265 { "rx_undersize_packets" },
7266 { "rx_oversize_packets" },
7267 { "rx_64_byte_packets" },
7268 { "rx_65_to_127_byte_packets" },
7269 { "rx_128_to_255_byte_packets" },
7270 { "rx_256_to_511_byte_packets" },
7271 { "rx_512_to_1023_byte_packets" },
7272 { "rx_1024_to_1522_byte_packets" },
7273 { "rx_1523_to_9022_byte_packets" },
7274 { "tx_64_byte_packets" },
7275 { "tx_65_to_127_byte_packets" },
7276 { "tx_128_to_255_byte_packets" },
7277 { "tx_256_to_511_byte_packets" },
7278 { "tx_512_to_1023_byte_packets" },
7279 { "tx_1024_to_1522_byte_packets" },
7280 { "tx_1523_to_9022_byte_packets" },
7281 { "rx_xon_frames" },
7282 { "rx_xoff_frames" },
7283 { "tx_xon_frames" },
7284 { "tx_xoff_frames" },
7285 { "rx_mac_ctrl_frames" },
7286 { "rx_filtered_packets" },
7287 { "rx_ftq_discards" },
7289 { "rx_fw_discards" },
7292 #define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\
7293 sizeof(bnx2_stats_str_arr[0]))
7295 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7297 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7298 STATS_OFFSET32(stat_IfHCInOctets_hi),
7299 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7300 STATS_OFFSET32(stat_IfHCOutOctets_hi),
7301 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7302 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7303 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7304 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7305 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7306 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7307 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7308 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7309 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7310 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7311 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7312 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7313 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7314 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7315 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7316 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7317 STATS_OFFSET32(stat_EtherStatsCollisions),
7318 STATS_OFFSET32(stat_EtherStatsFragments),
7319 STATS_OFFSET32(stat_EtherStatsJabbers),
7320 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7321 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7322 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7323 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7324 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7325 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7326 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7327 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7328 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7329 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7330 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7331 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7332 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7333 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7334 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7335 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7336 STATS_OFFSET32(stat_XonPauseFramesReceived),
7337 STATS_OFFSET32(stat_XoffPauseFramesReceived),
7338 STATS_OFFSET32(stat_OutXonSent),
7339 STATS_OFFSET32(stat_OutXoffSent),
7340 STATS_OFFSET32(stat_MacControlFramesReceived),
7341 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7342 STATS_OFFSET32(stat_IfInFTQDiscards),
7343 STATS_OFFSET32(stat_IfInMBUFDiscards),
7344 STATS_OFFSET32(stat_FwRxDrop),
7347 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7348 * skipped because of errata.
7350 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7351 8,0,8,8,8,8,8,8,8,8,
7352 4,0,4,4,4,4,4,4,4,4,
7353 4,4,4,4,4,4,4,4,4,4,
7354 4,4,4,4,4,4,4,4,4,4,
7358 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7359 8,0,8,8,8,8,8,8,8,8,
7360 4,4,4,4,4,4,4,4,4,4,
7361 4,4,4,4,4,4,4,4,4,4,
7362 4,4,4,4,4,4,4,4,4,4,
7366 #define BNX2_NUM_TESTS 6
7369 char string[ETH_GSTRING_LEN];
7370 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7371 { "register_test (offline)" },
7372 { "memory_test (offline)" },
7373 { "loopback_test (offline)" },
7374 { "nvram_test (online)" },
7375 { "interrupt_test (online)" },
7376 { "link_test (online)" },
7380 bnx2_get_sset_count(struct net_device *dev, int sset)
7384 return BNX2_NUM_TESTS;
7386 return BNX2_NUM_STATS;
7393 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7395 struct bnx2 *bp = netdev_priv(dev);
7397 bnx2_set_power_state(bp, PCI_D0);
7399 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7400 if (etest->flags & ETH_TEST_FL_OFFLINE) {
7403 bnx2_netif_stop(bp, true);
7404 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7407 if (bnx2_test_registers(bp) != 0) {
7409 etest->flags |= ETH_TEST_FL_FAILED;
7411 if (bnx2_test_memory(bp) != 0) {
7413 etest->flags |= ETH_TEST_FL_FAILED;
7415 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7416 etest->flags |= ETH_TEST_FL_FAILED;
7418 if (!netif_running(bp->dev))
7419 bnx2_shutdown_chip(bp);
7421 bnx2_init_nic(bp, 1);
7422 bnx2_netif_start(bp, true);
7425 /* wait for link up */
7426 for (i = 0; i < 7; i++) {
7429 msleep_interruptible(1000);
7433 if (bnx2_test_nvram(bp) != 0) {
7435 etest->flags |= ETH_TEST_FL_FAILED;
7437 if (bnx2_test_intr(bp) != 0) {
7439 etest->flags |= ETH_TEST_FL_FAILED;
7442 if (bnx2_test_link(bp) != 0) {
7444 etest->flags |= ETH_TEST_FL_FAILED;
7447 if (!netif_running(bp->dev))
7448 bnx2_set_power_state(bp, PCI_D3hot);
7452 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7454 switch (stringset) {
7456 memcpy(buf, bnx2_stats_str_arr,
7457 sizeof(bnx2_stats_str_arr));
7460 memcpy(buf, bnx2_tests_str_arr,
7461 sizeof(bnx2_tests_str_arr));
7467 bnx2_get_ethtool_stats(struct net_device *dev,
7468 struct ethtool_stats *stats, u64 *buf)
7470 struct bnx2 *bp = netdev_priv(dev);
7472 u32 *hw_stats = (u32 *) bp->stats_blk;
7473 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
7474 u8 *stats_len_arr = NULL;
7476 if (hw_stats == NULL) {
7477 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7481 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7482 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7483 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7484 (CHIP_ID(bp) == CHIP_ID_5708_A0))
7485 stats_len_arr = bnx2_5706_stats_len_arr;
7487 stats_len_arr = bnx2_5708_stats_len_arr;
7489 for (i = 0; i < BNX2_NUM_STATS; i++) {
7490 unsigned long offset;
7492 if (stats_len_arr[i] == 0) {
7493 /* skip this counter */
7498 offset = bnx2_stats_offset_arr[i];
7499 if (stats_len_arr[i] == 4) {
7500 /* 4-byte counter */
7501 buf[i] = (u64) *(hw_stats + offset) +
7502 *(temp_stats + offset);
7505 /* 8-byte counter */
7506 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7507 *(hw_stats + offset + 1) +
7508 (((u64) *(temp_stats + offset)) << 32) +
7509 *(temp_stats + offset + 1);
7514 bnx2_phys_id(struct net_device *dev, u32 data)
7516 struct bnx2 *bp = netdev_priv(dev);
7520 bnx2_set_power_state(bp, PCI_D0);
7525 save = REG_RD(bp, BNX2_MISC_CFG);
7526 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7528 for (i = 0; i < (data * 2); i++) {
7530 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7533 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7534 BNX2_EMAC_LED_1000MB_OVERRIDE |
7535 BNX2_EMAC_LED_100MB_OVERRIDE |
7536 BNX2_EMAC_LED_10MB_OVERRIDE |
7537 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7538 BNX2_EMAC_LED_TRAFFIC);
7540 msleep_interruptible(500);
7541 if (signal_pending(current))
7544 REG_WR(bp, BNX2_EMAC_LED, 0);
7545 REG_WR(bp, BNX2_MISC_CFG, save);
7547 if (!netif_running(dev))
7548 bnx2_set_power_state(bp, PCI_D3hot);
7554 bnx2_set_tx_csum(struct net_device *dev, u32 data)
7556 struct bnx2 *bp = netdev_priv(dev);
7558 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7559 return (ethtool_op_set_tx_ipv6_csum(dev, data));
7561 return (ethtool_op_set_tx_csum(dev, data));
7564 static const struct ethtool_ops bnx2_ethtool_ops = {
7565 .get_settings = bnx2_get_settings,
7566 .set_settings = bnx2_set_settings,
7567 .get_drvinfo = bnx2_get_drvinfo,
7568 .get_regs_len = bnx2_get_regs_len,
7569 .get_regs = bnx2_get_regs,
7570 .get_wol = bnx2_get_wol,
7571 .set_wol = bnx2_set_wol,
7572 .nway_reset = bnx2_nway_reset,
7573 .get_link = bnx2_get_link,
7574 .get_eeprom_len = bnx2_get_eeprom_len,
7575 .get_eeprom = bnx2_get_eeprom,
7576 .set_eeprom = bnx2_set_eeprom,
7577 .get_coalesce = bnx2_get_coalesce,
7578 .set_coalesce = bnx2_set_coalesce,
7579 .get_ringparam = bnx2_get_ringparam,
7580 .set_ringparam = bnx2_set_ringparam,
7581 .get_pauseparam = bnx2_get_pauseparam,
7582 .set_pauseparam = bnx2_set_pauseparam,
7583 .get_rx_csum = bnx2_get_rx_csum,
7584 .set_rx_csum = bnx2_set_rx_csum,
7585 .set_tx_csum = bnx2_set_tx_csum,
7586 .set_sg = ethtool_op_set_sg,
7587 .set_tso = bnx2_set_tso,
7588 .self_test = bnx2_self_test,
7589 .get_strings = bnx2_get_strings,
7590 .phys_id = bnx2_phys_id,
7591 .get_ethtool_stats = bnx2_get_ethtool_stats,
7592 .get_sset_count = bnx2_get_sset_count,
7595 /* Called with rtnl_lock */
7597 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7599 struct mii_ioctl_data *data = if_mii(ifr);
7600 struct bnx2 *bp = netdev_priv(dev);
7605 data->phy_id = bp->phy_addr;
7611 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7614 if (!netif_running(dev))
7617 spin_lock_bh(&bp->phy_lock);
7618 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7619 spin_unlock_bh(&bp->phy_lock);
7621 data->val_out = mii_regval;
7627 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7630 if (!netif_running(dev))
7633 spin_lock_bh(&bp->phy_lock);
7634 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7635 spin_unlock_bh(&bp->phy_lock);
7646 /* Called with rtnl_lock */
7648 bnx2_change_mac_addr(struct net_device *dev, void *p)
7650 struct sockaddr *addr = p;
7651 struct bnx2 *bp = netdev_priv(dev);
7653 if (!is_valid_ether_addr(addr->sa_data))
7656 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7657 if (netif_running(dev))
7658 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7663 /* Called with rtnl_lock */
7665 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7667 struct bnx2 *bp = netdev_priv(dev);
7669 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7670 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7674 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7677 #ifdef CONFIG_NET_POLL_CONTROLLER
7679 poll_bnx2(struct net_device *dev)
7681 struct bnx2 *bp = netdev_priv(dev);
7684 for (i = 0; i < bp->irq_nvecs; i++) {
7685 struct bnx2_irq *irq = &bp->irq_tbl[i];
7687 disable_irq(irq->vector);
7688 irq->handler(irq->vector, &bp->bnx2_napi[i]);
7689 enable_irq(irq->vector);
7694 static void __devinit
7695 bnx2_get_5709_media(struct bnx2 *bp)
7697 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7698 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7701 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7703 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7704 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7708 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7709 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7711 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7713 if (PCI_FUNC(bp->pdev->devfn) == 0) {
7718 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7726 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7732 static void __devinit
7733 bnx2_get_pci_speed(struct bnx2 *bp)
7737 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7738 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7741 bp->flags |= BNX2_FLAG_PCIX;
7743 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7745 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7747 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7748 bp->bus_speed_mhz = 133;
7751 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7752 bp->bus_speed_mhz = 100;
7755 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7756 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7757 bp->bus_speed_mhz = 66;
7760 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7761 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7762 bp->bus_speed_mhz = 50;
7765 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7766 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7767 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7768 bp->bus_speed_mhz = 33;
7773 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7774 bp->bus_speed_mhz = 66;
7776 bp->bus_speed_mhz = 33;
7779 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7780 bp->flags |= BNX2_FLAG_PCI_32BIT;
7784 static void __devinit
7785 bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7789 unsigned int block_end, rosize, len;
7791 #define BNX2_VPD_NVRAM_OFFSET 0x300
7792 #define BNX2_VPD_LEN 128
7793 #define BNX2_MAX_VER_SLEN 30
7795 data = kmalloc(256, GFP_KERNEL);
7799 rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
7804 for (i = 0; i < BNX2_VPD_LEN; i += 4) {
7805 data[i] = data[i + BNX2_VPD_LEN + 3];
7806 data[i + 1] = data[i + BNX2_VPD_LEN + 2];
7807 data[i + 2] = data[i + BNX2_VPD_LEN + 1];
7808 data[i + 3] = data[i + BNX2_VPD_LEN];
7811 i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
7815 rosize = pci_vpd_lrdt_size(&data[i]);
7816 i += PCI_VPD_LRDT_TAG_SIZE;
7817 block_end = i + rosize;
7819 if (block_end > BNX2_VPD_LEN)
7822 j = pci_vpd_find_info_keyword(data, i, rosize,
7823 PCI_VPD_RO_KEYWORD_MFR_ID);
7827 len = pci_vpd_info_field_size(&data[j]);
7829 j += PCI_VPD_INFO_FLD_HDR_SIZE;
7830 if (j + len > block_end || len != 4 ||
7831 memcmp(&data[j], "1028", 4))
7834 j = pci_vpd_find_info_keyword(data, i, rosize,
7835 PCI_VPD_RO_KEYWORD_VENDOR0);
7839 len = pci_vpd_info_field_size(&data[j]);
7841 j += PCI_VPD_INFO_FLD_HDR_SIZE;
7842 if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
7845 memcpy(bp->fw_version, &data[j], len);
7846 bp->fw_version[len] = ' ';
7852 static int __devinit
7853 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7856 unsigned long mem_len;
7859 u64 dma_mask, persist_dma_mask;
7861 SET_NETDEV_DEV(dev, &pdev->dev);
7862 bp = netdev_priv(dev);
7867 bp->temp_stats_blk =
7868 kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
7870 if (bp->temp_stats_blk == NULL) {
7875 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7876 rc = pci_enable_device(pdev);
7878 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
7882 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7884 "Cannot find PCI device base address, aborting\n");
7886 goto err_out_disable;
7889 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7891 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
7892 goto err_out_disable;
7895 pci_set_master(pdev);
7896 pci_save_state(pdev);
7898 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7899 if (bp->pm_cap == 0) {
7901 "Cannot find power management capability, aborting\n");
7903 goto err_out_release;
7909 spin_lock_init(&bp->phy_lock);
7910 spin_lock_init(&bp->indirect_lock);
7912 mutex_init(&bp->cnic_lock);
7914 INIT_WORK(&bp->reset_task, bnx2_reset_task);
7916 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7917 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
7918 dev->mem_end = dev->mem_start + mem_len;
7919 dev->irq = pdev->irq;
7921 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7924 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
7926 goto err_out_release;
7929 /* Configure byte swap and enable write to the reg_window registers.
7930 * Rely on CPU to do target byte swapping on big endian systems
7931 * The chip's target access swapping will not swap all accesses
7933 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7934 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7935 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7937 bnx2_set_power_state(bp, PCI_D0);
7939 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7941 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7942 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7944 "Cannot find PCIE capability, aborting\n");
7948 bp->flags |= BNX2_FLAG_PCIE;
7949 if (CHIP_REV(bp) == CHIP_REV_Ax)
7950 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7952 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7953 if (bp->pcix_cap == 0) {
7955 "Cannot find PCIX capability, aborting\n");
7959 bp->flags |= BNX2_FLAG_BROKEN_STATS;
7962 if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7963 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7964 bp->flags |= BNX2_FLAG_MSIX_CAP;
7967 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7968 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7969 bp->flags |= BNX2_FLAG_MSI_CAP;
7972 /* 5708 cannot support DMA addresses > 40-bit. */
7973 if (CHIP_NUM(bp) == CHIP_NUM_5708)
7974 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
7976 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
7978 /* Configure DMA attributes. */
7979 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7980 dev->features |= NETIF_F_HIGHDMA;
7981 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7984 "pci_set_consistent_dma_mask failed, aborting\n");
7987 } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
7988 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
7992 if (!(bp->flags & BNX2_FLAG_PCIE))
7993 bnx2_get_pci_speed(bp);
7995 /* 5706A0 may falsely detect SERR and PERR. */
7996 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7997 reg = REG_RD(bp, PCI_COMMAND);
7998 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7999 REG_WR(bp, PCI_COMMAND, reg);
8001 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
8002 !(bp->flags & BNX2_FLAG_PCIX)) {
8005 "5706 A1 can only be used in a PCIX bus, aborting\n");
8009 bnx2_init_nvram(bp);
8011 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
8013 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
8014 BNX2_SHM_HDR_SIGNATURE_SIG) {
8015 u32 off = PCI_FUNC(pdev->devfn) << 2;
8017 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
8019 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
8021 /* Get the permanent MAC address. First we need to make sure the
8022 * firmware is actually running.
8024 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
8026 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8027 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
8028 dev_err(&pdev->dev, "Firmware not running, aborting\n");
8033 bnx2_read_vpd_fw_ver(bp);
8035 j = strlen(bp->fw_version);
8036 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
8037 for (i = 0; i < 3 && j < 24; i++) {
8041 bp->fw_version[j++] = 'b';
8042 bp->fw_version[j++] = 'c';
8043 bp->fw_version[j++] = ' ';
8045 num = (u8) (reg >> (24 - (i * 8)));
8046 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8047 if (num >= k || !skip0 || k == 1) {
8048 bp->fw_version[j++] = (num / k) + '0';
8053 bp->fw_version[j++] = '.';
8055 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
8056 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8059 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
8060 bp->flags |= BNX2_FLAG_ASF_ENABLE;
8062 for (i = 0; i < 30; i++) {
8063 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8064 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8069 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
8070 reg &= BNX2_CONDITION_MFW_RUN_MASK;
8071 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8072 reg != BNX2_CONDITION_MFW_RUN_NONE) {
8073 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
8076 bp->fw_version[j++] = ' ';
8077 for (i = 0; i < 3 && j < 28; i++) {
8078 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
8080 memcpy(&bp->fw_version[j], ®, 4);
8085 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
8086 bp->mac_addr[0] = (u8) (reg >> 8);
8087 bp->mac_addr[1] = (u8) reg;
8089 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
8090 bp->mac_addr[2] = (u8) (reg >> 24);
8091 bp->mac_addr[3] = (u8) (reg >> 16);
8092 bp->mac_addr[4] = (u8) (reg >> 8);
8093 bp->mac_addr[5] = (u8) reg;
8095 bp->tx_ring_size = MAX_TX_DESC_CNT;
8096 bnx2_set_rx_ring_size(bp, 255);
8100 bp->tx_quick_cons_trip_int = 2;
8101 bp->tx_quick_cons_trip = 20;
8102 bp->tx_ticks_int = 18;
8105 bp->rx_quick_cons_trip_int = 2;
8106 bp->rx_quick_cons_trip = 12;
8107 bp->rx_ticks_int = 18;
8110 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
8112 bp->current_interval = BNX2_TIMER_INTERVAL;
8116 /* Disable WOL support if we are running on a SERDES chip. */
8117 if (CHIP_NUM(bp) == CHIP_NUM_5709)
8118 bnx2_get_5709_media(bp);
8119 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
8120 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
8122 bp->phy_port = PORT_TP;
8123 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
8124 bp->phy_port = PORT_FIBRE;
8125 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
8126 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
8127 bp->flags |= BNX2_FLAG_NO_WOL;
8130 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
8131 /* Don't do parallel detect on this board because of
8132 * some board problems. The link will not go down
8133 * if we do parallel detect.
8135 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8136 pdev->subsystem_device == 0x310c)
8137 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8140 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
8141 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
8143 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
8144 CHIP_NUM(bp) == CHIP_NUM_5708)
8145 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
8146 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
8147 (CHIP_REV(bp) == CHIP_REV_Ax ||
8148 CHIP_REV(bp) == CHIP_REV_Bx))
8149 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
8151 bnx2_init_fw_cap(bp);
8153 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
8154 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
8155 (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
8156 !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
8157 bp->flags |= BNX2_FLAG_NO_WOL;
8161 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8162 bp->tx_quick_cons_trip_int =
8163 bp->tx_quick_cons_trip;
8164 bp->tx_ticks_int = bp->tx_ticks;
8165 bp->rx_quick_cons_trip_int =
8166 bp->rx_quick_cons_trip;
8167 bp->rx_ticks_int = bp->rx_ticks;
8168 bp->comp_prod_trip_int = bp->comp_prod_trip;
8169 bp->com_ticks_int = bp->com_ticks;
8170 bp->cmd_ticks_int = bp->cmd_ticks;
8173 /* Disable MSI on 5706 if AMD 8132 bridge is found.
8175 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
8176 * with byte enables disabled on the unused 32-bit word. This is legal
8177 * but causes problems on the AMD 8132 which will eventually stop
8178 * responding after a while.
8180 * AMD believes this incompatibility is unique to the 5706, and
8181 * prefers to locally disable MSI rather than globally disabling it.
8183 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
8184 struct pci_dev *amd_8132 = NULL;
8186 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8187 PCI_DEVICE_ID_AMD_8132_BRIDGE,
8190 if (amd_8132->revision >= 0x10 &&
8191 amd_8132->revision <= 0x13) {
8193 pci_dev_put(amd_8132);
8199 bnx2_set_default_link(bp);
8200 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8202 init_timer(&bp->timer);
8203 bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
8204 bp->timer.data = (unsigned long) bp;
8205 bp->timer.function = bnx2_timer;
8211 iounmap(bp->regview);
8216 pci_release_regions(pdev);
8219 pci_disable_device(pdev);
8220 pci_set_drvdata(pdev, NULL);
8226 static char * __devinit
8227 bnx2_bus_string(struct bnx2 *bp, char *str)
8231 if (bp->flags & BNX2_FLAG_PCIE) {
8232 s += sprintf(s, "PCI Express");
8234 s += sprintf(s, "PCI");
8235 if (bp->flags & BNX2_FLAG_PCIX)
8236 s += sprintf(s, "-X");
8237 if (bp->flags & BNX2_FLAG_PCI_32BIT)
8238 s += sprintf(s, " 32-bit");
8240 s += sprintf(s, " 64-bit");
8241 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8247 bnx2_del_napi(struct bnx2 *bp)
8251 for (i = 0; i < bp->irq_nvecs; i++)
8252 netif_napi_del(&bp->bnx2_napi[i].napi);
8256 bnx2_init_napi(struct bnx2 *bp)
8260 for (i = 0; i < bp->irq_nvecs; i++) {
8261 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8262 int (*poll)(struct napi_struct *, int);
8267 poll = bnx2_poll_msix;
8269 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8274 static const struct net_device_ops bnx2_netdev_ops = {
8275 .ndo_open = bnx2_open,
8276 .ndo_start_xmit = bnx2_start_xmit,
8277 .ndo_stop = bnx2_close,
8278 .ndo_get_stats = bnx2_get_stats,
8279 .ndo_set_rx_mode = bnx2_set_rx_mode,
8280 .ndo_do_ioctl = bnx2_ioctl,
8281 .ndo_validate_addr = eth_validate_addr,
8282 .ndo_set_mac_address = bnx2_change_mac_addr,
8283 .ndo_change_mtu = bnx2_change_mtu,
8284 .ndo_tx_timeout = bnx2_tx_timeout,
8286 .ndo_vlan_rx_register = bnx2_vlan_rx_register,
8288 #ifdef CONFIG_NET_POLL_CONTROLLER
8289 .ndo_poll_controller = poll_bnx2,
8293 static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
8296 dev->vlan_features |= flags;
8300 static int __devinit
8301 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8303 static int version_printed = 0;
8304 struct net_device *dev = NULL;
8309 if (version_printed++ == 0)
8310 pr_info("%s", version);
8312 /* dev zeroed in init_etherdev */
8313 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8318 rc = bnx2_init_board(pdev, dev);
8324 dev->netdev_ops = &bnx2_netdev_ops;
8325 dev->watchdog_timeo = TX_TIMEOUT;
8326 dev->ethtool_ops = &bnx2_ethtool_ops;
8328 bp = netdev_priv(dev);
8330 pci_set_drvdata(pdev, dev);
8332 rc = bnx2_request_firmware(bp);
8336 memcpy(dev->dev_addr, bp->mac_addr, 6);
8337 memcpy(dev->perm_addr, bp->mac_addr, 6);
8339 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO;
8340 vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG);
8341 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8342 dev->features |= NETIF_F_IPV6_CSUM;
8343 vlan_features_add(dev, NETIF_F_IPV6_CSUM);
8346 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8348 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
8349 vlan_features_add(dev, NETIF_F_TSO | NETIF_F_TSO_ECN);
8350 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8351 dev->features |= NETIF_F_TSO6;
8352 vlan_features_add(dev, NETIF_F_TSO6);
8354 if ((rc = register_netdev(dev))) {
8355 dev_err(&pdev->dev, "Cannot register net device\n");
8359 netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, node addr %pM\n",
8360 board_info[ent->driver_data].name,
8361 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8362 ((CHIP_ID(bp) & 0x0ff0) >> 4),
8363 bnx2_bus_string(bp, str),
8365 bp->pdev->irq, dev->dev_addr);
8370 if (bp->mips_firmware)
8371 release_firmware(bp->mips_firmware);
8372 if (bp->rv2p_firmware)
8373 release_firmware(bp->rv2p_firmware);
8376 iounmap(bp->regview);
8377 pci_release_regions(pdev);
8378 pci_disable_device(pdev);
8379 pci_set_drvdata(pdev, NULL);
8384 static void __devexit
8385 bnx2_remove_one(struct pci_dev *pdev)
8387 struct net_device *dev = pci_get_drvdata(pdev);
8388 struct bnx2 *bp = netdev_priv(dev);
8390 flush_scheduled_work();
8392 unregister_netdev(dev);
8394 if (bp->mips_firmware)
8395 release_firmware(bp->mips_firmware);
8396 if (bp->rv2p_firmware)
8397 release_firmware(bp->rv2p_firmware);
8400 iounmap(bp->regview);
8402 kfree(bp->temp_stats_blk);
8405 pci_release_regions(pdev);
8406 pci_disable_device(pdev);
8407 pci_set_drvdata(pdev, NULL);
8411 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8413 struct net_device *dev = pci_get_drvdata(pdev);
8414 struct bnx2 *bp = netdev_priv(dev);
8416 /* PCI register 4 needs to be saved whether netif_running() or not.
8417 * MSI address and data need to be saved if using MSI and
8420 pci_save_state(pdev);
8421 if (!netif_running(dev))
8424 flush_scheduled_work();
8425 bnx2_netif_stop(bp, true);
8426 netif_device_detach(dev);
8427 del_timer_sync(&bp->timer);
8428 bnx2_shutdown_chip(bp);
8430 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
8435 bnx2_resume(struct pci_dev *pdev)
8437 struct net_device *dev = pci_get_drvdata(pdev);
8438 struct bnx2 *bp = netdev_priv(dev);
8440 pci_restore_state(pdev);
8441 if (!netif_running(dev))
8444 bnx2_set_power_state(bp, PCI_D0);
8445 netif_device_attach(dev);
8446 bnx2_init_nic(bp, 1);
8447 bnx2_netif_start(bp, true);
8452 * bnx2_io_error_detected - called when PCI error is detected
8453 * @pdev: Pointer to PCI device
8454 * @state: The current pci connection state
8456 * This function is called after a PCI bus error affecting
8457 * this device has been detected.
8459 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8460 pci_channel_state_t state)
8462 struct net_device *dev = pci_get_drvdata(pdev);
8463 struct bnx2 *bp = netdev_priv(dev);
8466 netif_device_detach(dev);
8468 if (state == pci_channel_io_perm_failure) {
8470 return PCI_ERS_RESULT_DISCONNECT;
8473 if (netif_running(dev)) {
8474 bnx2_netif_stop(bp, true);
8475 del_timer_sync(&bp->timer);
8476 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8479 pci_disable_device(pdev);
8482 /* Request a slot slot reset. */
8483 return PCI_ERS_RESULT_NEED_RESET;
8487 * bnx2_io_slot_reset - called after the pci bus has been reset.
8488 * @pdev: Pointer to PCI device
8490 * Restart the card from scratch, as if from a cold-boot.
8492 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8494 struct net_device *dev = pci_get_drvdata(pdev);
8495 struct bnx2 *bp = netdev_priv(dev);
8498 if (pci_enable_device(pdev)) {
8500 "Cannot re-enable PCI device after reset\n");
8502 return PCI_ERS_RESULT_DISCONNECT;
8504 pci_set_master(pdev);
8505 pci_restore_state(pdev);
8506 pci_save_state(pdev);
8508 if (netif_running(dev)) {
8509 bnx2_set_power_state(bp, PCI_D0);
8510 bnx2_init_nic(bp, 1);
8514 return PCI_ERS_RESULT_RECOVERED;
8518 * bnx2_io_resume - called when traffic can start flowing again.
8519 * @pdev: Pointer to PCI device
8521 * This callback is called when the error recovery driver tells us that
8522 * its OK to resume normal operation.
8524 static void bnx2_io_resume(struct pci_dev *pdev)
8526 struct net_device *dev = pci_get_drvdata(pdev);
8527 struct bnx2 *bp = netdev_priv(dev);
8530 if (netif_running(dev))
8531 bnx2_netif_start(bp, true);
8533 netif_device_attach(dev);
8537 static struct pci_error_handlers bnx2_err_handler = {
8538 .error_detected = bnx2_io_error_detected,
8539 .slot_reset = bnx2_io_slot_reset,
8540 .resume = bnx2_io_resume,
8543 static struct pci_driver bnx2_pci_driver = {
8544 .name = DRV_MODULE_NAME,
8545 .id_table = bnx2_pci_tbl,
8546 .probe = bnx2_init_one,
8547 .remove = __devexit_p(bnx2_remove_one),
8548 .suspend = bnx2_suspend,
8549 .resume = bnx2_resume,
8550 .err_handler = &bnx2_err_handler,
8553 static int __init bnx2_init(void)
8555 return pci_register_driver(&bnx2_pci_driver);
8558 static void __exit bnx2_cleanup(void)
8560 pci_unregister_driver(&bnx2_pci_driver);
8563 module_init(bnx2_init);
8564 module_exit(bnx2_cleanup);