]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2.c
net: dont update dev->trans_start
[net-next-2.6.git] / drivers / net / bnx2.c
CommitLineData
b6016b76
MC
1/* bnx2.c: Broadcom NX2 network driver.
2 *
a6952b52 3 * Copyright (c) 2004-2009 Broadcom Corporation
b6016b76
MC
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
f2a4f052
MC
12
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15
16#include <linux/kernel.h>
17#include <linux/timer.h>
18#include <linux/errno.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/interrupt.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/dma-mapping.h>
1977f032 29#include <linux/bitops.h>
f2a4f052
MC
30#include <asm/io.h>
31#include <asm/irq.h>
32#include <linux/delay.h>
33#include <asm/byteorder.h>
c86a31f4 34#include <asm/page.h>
f2a4f052
MC
35#include <linux/time.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
f2a4f052 38#include <linux/if_vlan.h>
08013fa3 39#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
f2a4f052
MC
40#define BCM_VLAN 1
41#endif
f2a4f052 42#include <net/ip.h>
de081fa5 43#include <net/tcp.h>
f2a4f052 44#include <net/checksum.h>
f2a4f052
MC
45#include <linux/workqueue.h>
46#include <linux/crc32.h>
47#include <linux/prefetch.h>
29b12174 48#include <linux/cache.h>
57579f76 49#include <linux/firmware.h>
706bf240 50#include <linux/log2.h>
f2a4f052 51
b6016b76
MC
52#include "bnx2.h"
53#include "bnx2_fw.h"
b3448b0b 54
b6016b76
MC
55#define DRV_MODULE_NAME "bnx2"
56#define PFX DRV_MODULE_NAME ": "
581daf7e
MC
57#define DRV_MODULE_VERSION "2.0.1"
58#define DRV_MODULE_RELDATE "May 6, 2009"
57579f76
MC
59#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-4.6.16.fw"
60#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-4.6.16.fw"
61#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-4.6.17.fw"
62#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-4.6.15.fw"
b6016b76
MC
63
64#define RUN_AT(x) (jiffies + (x))
65
66/* Time in jiffies before concluding the transmitter is hung. */
67#define TX_TIMEOUT (5*HZ)
68
fefa8645 69static char version[] __devinitdata =
b6016b76
MC
70 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
71
72MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
453a9c6e 73MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
b6016b76
MC
74MODULE_LICENSE("GPL");
75MODULE_VERSION(DRV_MODULE_VERSION);
57579f76
MC
76MODULE_FIRMWARE(FW_MIPS_FILE_06);
77MODULE_FIRMWARE(FW_RV2P_FILE_06);
78MODULE_FIRMWARE(FW_MIPS_FILE_09);
79MODULE_FIRMWARE(FW_RV2P_FILE_09);
b6016b76
MC
80
81static int disable_msi = 0;
82
83module_param(disable_msi, int, 0);
84MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
85
86typedef enum {
87 BCM5706 = 0,
88 NC370T,
89 NC370I,
90 BCM5706S,
91 NC370F,
5b0c76ad
MC
92 BCM5708,
93 BCM5708S,
bac0dff6 94 BCM5709,
27a005b8 95 BCM5709S,
7bb0a04f 96 BCM5716,
1caacecb 97 BCM5716S,
b6016b76
MC
98} board_t;
99
100/* indexed by board_t, above */
fefa8645 101static struct {
b6016b76
MC
102 char *name;
103} board_info[] __devinitdata = {
104 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
105 { "HP NC370T Multifunction Gigabit Server Adapter" },
106 { "HP NC370i Multifunction Gigabit Server Adapter" },
107 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
108 { "HP NC370F Multifunction Gigabit Server Adapter" },
5b0c76ad
MC
109 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
110 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
bac0dff6 111 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
27a005b8 112 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
7bb0a04f 113 { "Broadcom NetXtreme II BCM5716 1000Base-T" },
1caacecb 114 { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
b6016b76
MC
115 };
116
7bb0a04f 117static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
b6016b76
MC
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
119 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
121 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
5b0c76ad
MC
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
125 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
b6016b76
MC
126 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
127 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
128 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
5b0c76ad
MC
130 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
bac0dff6
MC
132 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
133 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
27a005b8
MC
134 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
135 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
7bb0a04f
MC
136 { PCI_VENDOR_ID_BROADCOM, 0x163b,
137 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
1caacecb 138 { PCI_VENDOR_ID_BROADCOM, 0x163c,
1f2435e5 139 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
b6016b76
MC
140 { 0, }
141};
142
143static struct flash_spec flash_table[] =
144{
e30372c9
MC
145#define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
146#define NONBUFFERED_FLAGS (BNX2_NV_WREN)
b6016b76 147 /* Slow EEPROM */
37137709 148 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
e30372c9 149 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
b6016b76
MC
150 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
151 "EEPROM - slow"},
37137709
MC
152 /* Expansion entry 0001 */
153 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 154 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
155 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
156 "Entry 0001"},
b6016b76
MC
157 /* Saifun SA25F010 (non-buffered flash) */
158 /* strap, cfg1, & write1 need updates */
37137709 159 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 160 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
b6016b76
MC
161 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
162 "Non-buffered flash (128kB)"},
163 /* Saifun SA25F020 (non-buffered flash) */
164 /* strap, cfg1, & write1 need updates */
37137709 165 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 166 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
b6016b76
MC
167 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
168 "Non-buffered flash (256kB)"},
37137709
MC
169 /* Expansion entry 0100 */
170 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 171 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
172 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
173 "Entry 0100"},
174 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
6aa20a22 175 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
e30372c9 176 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
37137709
MC
177 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
178 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
179 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
180 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
e30372c9 181 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
37137709
MC
182 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
183 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
184 /* Saifun SA25F005 (non-buffered flash) */
185 /* strap, cfg1, & write1 need updates */
186 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 187 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
188 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
189 "Non-buffered flash (64kB)"},
190 /* Fast EEPROM */
191 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
e30372c9 192 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
37137709
MC
193 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
194 "EEPROM - fast"},
195 /* Expansion entry 1001 */
196 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 197 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
198 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
199 "Entry 1001"},
200 /* Expansion entry 1010 */
201 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 202 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
203 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
204 "Entry 1010"},
205 /* ATMEL AT45DB011B (buffered flash) */
206 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 207 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
208 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
209 "Buffered flash (128kB)"},
210 /* Expansion entry 1100 */
211 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 212 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
213 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
214 "Entry 1100"},
215 /* Expansion entry 1101 */
216 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 217 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
218 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
219 "Entry 1101"},
220 /* Ateml Expansion entry 1110 */
221 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 222 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
223 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
224 "Entry 1110 (Atmel)"},
225 /* ATMEL AT45DB021B (buffered flash) */
226 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 227 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
228 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
229 "Buffered flash (256kB)"},
b6016b76
MC
230};
231
e30372c9
MC
232static struct flash_spec flash_5709 = {
233 .flags = BNX2_NV_BUFFERED,
234 .page_bits = BCM5709_FLASH_PAGE_BITS,
235 .page_size = BCM5709_FLASH_PAGE_SIZE,
236 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
237 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
238 .name = "5709 Buffered flash (256kB)",
239};
240
b6016b76
MC
241MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
242
35e9010b 243static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
e89bbf10 244{
2f8af120 245 u32 diff;
e89bbf10 246
2f8af120 247 smp_mb();
faac9c4b
MC
248
249 /* The ring uses 256 indices for 255 entries, one of them
250 * needs to be skipped.
251 */
35e9010b 252 diff = txr->tx_prod - txr->tx_cons;
faac9c4b
MC
253 if (unlikely(diff >= TX_DESC_CNT)) {
254 diff &= 0xffff;
255 if (diff == TX_DESC_CNT)
256 diff = MAX_TX_DESC_CNT;
257 }
e89bbf10
MC
258 return (bp->tx_ring_size - diff);
259}
260
b6016b76
MC
261static u32
262bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
263{
1b8227c4
MC
264 u32 val;
265
266 spin_lock_bh(&bp->indirect_lock);
b6016b76 267 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
1b8227c4
MC
268 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
269 spin_unlock_bh(&bp->indirect_lock);
270 return val;
b6016b76
MC
271}
272
273static void
274bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
275{
1b8227c4 276 spin_lock_bh(&bp->indirect_lock);
b6016b76
MC
277 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
278 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
1b8227c4 279 spin_unlock_bh(&bp->indirect_lock);
b6016b76
MC
280}
281
2726d6e1
MC
282static void
283bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
284{
285 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
286}
287
288static u32
289bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
290{
291 return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
292}
293
b6016b76
MC
294static void
295bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
296{
297 offset += cid_addr;
1b8227c4 298 spin_lock_bh(&bp->indirect_lock);
59b47d8a
MC
299 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
300 int i;
301
302 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
303 REG_WR(bp, BNX2_CTX_CTX_CTRL,
304 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
305 for (i = 0; i < 5; i++) {
59b47d8a
MC
306 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
307 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
308 break;
309 udelay(5);
310 }
311 } else {
312 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
313 REG_WR(bp, BNX2_CTX_DATA, val);
314 }
1b8227c4 315 spin_unlock_bh(&bp->indirect_lock);
b6016b76
MC
316}
317
318static int
319bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
320{
321 u32 val1;
322 int i, ret;
323
583c28e5 324 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
b6016b76
MC
325 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
326 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
327
328 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
329 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
330
331 udelay(40);
332 }
333
334 val1 = (bp->phy_addr << 21) | (reg << 16) |
335 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
336 BNX2_EMAC_MDIO_COMM_START_BUSY;
337 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
338
339 for (i = 0; i < 50; i++) {
340 udelay(10);
341
342 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
343 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
344 udelay(5);
345
346 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
347 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
348
349 break;
350 }
351 }
352
353 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
354 *val = 0x0;
355 ret = -EBUSY;
356 }
357 else {
358 *val = val1;
359 ret = 0;
360 }
361
583c28e5 362 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
b6016b76
MC
363 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
364 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
365
366 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
367 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
368
369 udelay(40);
370 }
371
372 return ret;
373}
374
375static int
376bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
377{
378 u32 val1;
379 int i, ret;
380
583c28e5 381 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
b6016b76
MC
382 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
383 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
384
385 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
386 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
387
388 udelay(40);
389 }
390
391 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
392 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
393 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
394 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
6aa20a22 395
b6016b76
MC
396 for (i = 0; i < 50; i++) {
397 udelay(10);
398
399 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
400 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
401 udelay(5);
402 break;
403 }
404 }
405
406 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
407 ret = -EBUSY;
408 else
409 ret = 0;
410
583c28e5 411 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
b6016b76
MC
412 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
413 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
414
415 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
416 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
417
418 udelay(40);
419 }
420
421 return ret;
422}
423
424static void
425bnx2_disable_int(struct bnx2 *bp)
426{
b4b36042
MC
427 int i;
428 struct bnx2_napi *bnapi;
429
430 for (i = 0; i < bp->irq_nvecs; i++) {
431 bnapi = &bp->bnx2_napi[i];
432 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
433 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
434 }
b6016b76
MC
435 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
436}
437
438static void
439bnx2_enable_int(struct bnx2 *bp)
440{
b4b36042
MC
441 int i;
442 struct bnx2_napi *bnapi;
35efa7c1 443
b4b36042
MC
444 for (i = 0; i < bp->irq_nvecs; i++) {
445 bnapi = &bp->bnx2_napi[i];
1269a8a6 446
b4b36042
MC
447 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
448 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
449 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
450 bnapi->last_status_idx);
b6016b76 451
b4b36042
MC
452 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
453 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
454 bnapi->last_status_idx);
455 }
bf5295bb 456 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
b6016b76
MC
457}
458
459static void
460bnx2_disable_int_sync(struct bnx2 *bp)
461{
b4b36042
MC
462 int i;
463
b6016b76
MC
464 atomic_inc(&bp->intr_sem);
465 bnx2_disable_int(bp);
b4b36042
MC
466 for (i = 0; i < bp->irq_nvecs; i++)
467 synchronize_irq(bp->irq_tbl[i].vector);
b6016b76
MC
468}
469
35efa7c1
MC
470static void
471bnx2_napi_disable(struct bnx2 *bp)
472{
b4b36042
MC
473 int i;
474
475 for (i = 0; i < bp->irq_nvecs; i++)
476 napi_disable(&bp->bnx2_napi[i].napi);
35efa7c1
MC
477}
478
479static void
480bnx2_napi_enable(struct bnx2 *bp)
481{
b4b36042
MC
482 int i;
483
484 for (i = 0; i < bp->irq_nvecs; i++)
485 napi_enable(&bp->bnx2_napi[i].napi);
35efa7c1
MC
486}
487
b6016b76
MC
488static void
489bnx2_netif_stop(struct bnx2 *bp)
490{
491 bnx2_disable_int_sync(bp);
492 if (netif_running(bp->dev)) {
35efa7c1 493 bnx2_napi_disable(bp);
b6016b76
MC
494 netif_tx_disable(bp->dev);
495 bp->dev->trans_start = jiffies; /* prevent tx timeout */
496 }
497}
498
499static void
500bnx2_netif_start(struct bnx2 *bp)
501{
502 if (atomic_dec_and_test(&bp->intr_sem)) {
503 if (netif_running(bp->dev)) {
706bf240 504 netif_tx_wake_all_queues(bp->dev);
35efa7c1 505 bnx2_napi_enable(bp);
b6016b76
MC
506 bnx2_enable_int(bp);
507 }
508 }
509}
510
35e9010b
MC
511static void
512bnx2_free_tx_mem(struct bnx2 *bp)
513{
514 int i;
515
516 for (i = 0; i < bp->num_tx_rings; i++) {
517 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
518 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
519
520 if (txr->tx_desc_ring) {
521 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
522 txr->tx_desc_ring,
523 txr->tx_desc_mapping);
524 txr->tx_desc_ring = NULL;
525 }
526 kfree(txr->tx_buf_ring);
527 txr->tx_buf_ring = NULL;
528 }
529}
530
bb4f98ab
MC
531static void
532bnx2_free_rx_mem(struct bnx2 *bp)
533{
534 int i;
535
536 for (i = 0; i < bp->num_rx_rings; i++) {
537 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
538 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
539 int j;
540
541 for (j = 0; j < bp->rx_max_ring; j++) {
542 if (rxr->rx_desc_ring[j])
543 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
544 rxr->rx_desc_ring[j],
545 rxr->rx_desc_mapping[j]);
546 rxr->rx_desc_ring[j] = NULL;
547 }
548 if (rxr->rx_buf_ring)
549 vfree(rxr->rx_buf_ring);
550 rxr->rx_buf_ring = NULL;
551
552 for (j = 0; j < bp->rx_max_pg_ring; j++) {
553 if (rxr->rx_pg_desc_ring[j])
554 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
3298a738
MC
555 rxr->rx_pg_desc_ring[j],
556 rxr->rx_pg_desc_mapping[j]);
557 rxr->rx_pg_desc_ring[j] = NULL;
bb4f98ab
MC
558 }
559 if (rxr->rx_pg_ring)
560 vfree(rxr->rx_pg_ring);
561 rxr->rx_pg_ring = NULL;
562 }
563}
564
35e9010b
MC
565static int
566bnx2_alloc_tx_mem(struct bnx2 *bp)
567{
568 int i;
569
570 for (i = 0; i < bp->num_tx_rings; i++) {
571 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
572 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
573
574 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
575 if (txr->tx_buf_ring == NULL)
576 return -ENOMEM;
577
578 txr->tx_desc_ring =
579 pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
580 &txr->tx_desc_mapping);
581 if (txr->tx_desc_ring == NULL)
582 return -ENOMEM;
583 }
584 return 0;
585}
586
bb4f98ab
MC
587static int
588bnx2_alloc_rx_mem(struct bnx2 *bp)
589{
590 int i;
591
592 for (i = 0; i < bp->num_rx_rings; i++) {
593 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
594 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
595 int j;
596
597 rxr->rx_buf_ring =
598 vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
599 if (rxr->rx_buf_ring == NULL)
600 return -ENOMEM;
601
602 memset(rxr->rx_buf_ring, 0,
603 SW_RXBD_RING_SIZE * bp->rx_max_ring);
604
605 for (j = 0; j < bp->rx_max_ring; j++) {
606 rxr->rx_desc_ring[j] =
607 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
608 &rxr->rx_desc_mapping[j]);
609 if (rxr->rx_desc_ring[j] == NULL)
610 return -ENOMEM;
611
612 }
613
614 if (bp->rx_pg_ring_size) {
615 rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
616 bp->rx_max_pg_ring);
617 if (rxr->rx_pg_ring == NULL)
618 return -ENOMEM;
619
620 memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
621 bp->rx_max_pg_ring);
622 }
623
624 for (j = 0; j < bp->rx_max_pg_ring; j++) {
625 rxr->rx_pg_desc_ring[j] =
626 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
627 &rxr->rx_pg_desc_mapping[j]);
628 if (rxr->rx_pg_desc_ring[j] == NULL)
629 return -ENOMEM;
630
631 }
632 }
633 return 0;
634}
635
b6016b76
MC
636static void
637bnx2_free_mem(struct bnx2 *bp)
638{
13daffa2 639 int i;
43e80b89 640 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
13daffa2 641
35e9010b 642 bnx2_free_tx_mem(bp);
bb4f98ab 643 bnx2_free_rx_mem(bp);
35e9010b 644
59b47d8a
MC
645 for (i = 0; i < bp->ctx_pages; i++) {
646 if (bp->ctx_blk[i]) {
647 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
648 bp->ctx_blk[i],
649 bp->ctx_blk_mapping[i]);
650 bp->ctx_blk[i] = NULL;
651 }
652 }
43e80b89 653 if (bnapi->status_blk.msi) {
0f31f994 654 pci_free_consistent(bp->pdev, bp->status_stats_size,
43e80b89
MC
655 bnapi->status_blk.msi,
656 bp->status_blk_mapping);
657 bnapi->status_blk.msi = NULL;
0f31f994 658 bp->stats_blk = NULL;
b6016b76 659 }
b6016b76
MC
660}
661
662static int
663bnx2_alloc_mem(struct bnx2 *bp)
664{
35e9010b 665 int i, status_blk_size, err;
43e80b89
MC
666 struct bnx2_napi *bnapi;
667 void *status_blk;
b6016b76 668
0f31f994
MC
669 /* Combine status and statistics blocks into one allocation. */
670 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
f86e82fb 671 if (bp->flags & BNX2_FLAG_MSIX_CAP)
b4b36042
MC
672 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
673 BNX2_SBLK_MSIX_ALIGN_SIZE);
0f31f994
MC
674 bp->status_stats_size = status_blk_size +
675 sizeof(struct statistics_block);
676
43e80b89
MC
677 status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
678 &bp->status_blk_mapping);
679 if (status_blk == NULL)
b6016b76
MC
680 goto alloc_mem_err;
681
43e80b89 682 memset(status_blk, 0, bp->status_stats_size);
b6016b76 683
43e80b89
MC
684 bnapi = &bp->bnx2_napi[0];
685 bnapi->status_blk.msi = status_blk;
686 bnapi->hw_tx_cons_ptr =
687 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
688 bnapi->hw_rx_cons_ptr =
689 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
f86e82fb 690 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
b4b36042 691 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
43e80b89
MC
692 struct status_block_msix *sblk;
693
694 bnapi = &bp->bnx2_napi[i];
b4b36042 695
43e80b89
MC
696 sblk = (void *) (status_blk +
697 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
698 bnapi->status_blk.msix = sblk;
699 bnapi->hw_tx_cons_ptr =
700 &sblk->status_tx_quick_consumer_index;
701 bnapi->hw_rx_cons_ptr =
702 &sblk->status_rx_quick_consumer_index;
b4b36042
MC
703 bnapi->int_num = i << 24;
704 }
705 }
35efa7c1 706
43e80b89 707 bp->stats_blk = status_blk + status_blk_size;
b6016b76 708
0f31f994 709 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
b6016b76 710
59b47d8a
MC
711 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
712 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
713 if (bp->ctx_pages == 0)
714 bp->ctx_pages = 1;
715 for (i = 0; i < bp->ctx_pages; i++) {
716 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
717 BCM_PAGE_SIZE,
718 &bp->ctx_blk_mapping[i]);
719 if (bp->ctx_blk[i] == NULL)
720 goto alloc_mem_err;
721 }
722 }
35e9010b 723
bb4f98ab
MC
724 err = bnx2_alloc_rx_mem(bp);
725 if (err)
726 goto alloc_mem_err;
727
35e9010b
MC
728 err = bnx2_alloc_tx_mem(bp);
729 if (err)
730 goto alloc_mem_err;
731
b6016b76
MC
732 return 0;
733
734alloc_mem_err:
735 bnx2_free_mem(bp);
736 return -ENOMEM;
737}
738
e3648b3d
MC
739static void
740bnx2_report_fw_link(struct bnx2 *bp)
741{
742 u32 fw_link_status = 0;
743
583c28e5 744 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
745 return;
746
e3648b3d
MC
747 if (bp->link_up) {
748 u32 bmsr;
749
750 switch (bp->line_speed) {
751 case SPEED_10:
752 if (bp->duplex == DUPLEX_HALF)
753 fw_link_status = BNX2_LINK_STATUS_10HALF;
754 else
755 fw_link_status = BNX2_LINK_STATUS_10FULL;
756 break;
757 case SPEED_100:
758 if (bp->duplex == DUPLEX_HALF)
759 fw_link_status = BNX2_LINK_STATUS_100HALF;
760 else
761 fw_link_status = BNX2_LINK_STATUS_100FULL;
762 break;
763 case SPEED_1000:
764 if (bp->duplex == DUPLEX_HALF)
765 fw_link_status = BNX2_LINK_STATUS_1000HALF;
766 else
767 fw_link_status = BNX2_LINK_STATUS_1000FULL;
768 break;
769 case SPEED_2500:
770 if (bp->duplex == DUPLEX_HALF)
771 fw_link_status = BNX2_LINK_STATUS_2500HALF;
772 else
773 fw_link_status = BNX2_LINK_STATUS_2500FULL;
774 break;
775 }
776
777 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
778
779 if (bp->autoneg) {
780 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
781
ca58c3af
MC
782 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
783 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
e3648b3d
MC
784
785 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
583c28e5 786 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
e3648b3d
MC
787 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
788 else
789 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
790 }
791 }
792 else
793 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
794
2726d6e1 795 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
e3648b3d
MC
796}
797
9b1084b8
MC
798static char *
799bnx2_xceiver_str(struct bnx2 *bp)
800{
801 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
583c28e5 802 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
9b1084b8
MC
803 "Copper"));
804}
805
b6016b76
MC
806static void
807bnx2_report_link(struct bnx2 *bp)
808{
809 if (bp->link_up) {
810 netif_carrier_on(bp->dev);
9b1084b8
MC
811 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
812 bnx2_xceiver_str(bp));
b6016b76
MC
813
814 printk("%d Mbps ", bp->line_speed);
815
816 if (bp->duplex == DUPLEX_FULL)
817 printk("full duplex");
818 else
819 printk("half duplex");
820
821 if (bp->flow_ctrl) {
822 if (bp->flow_ctrl & FLOW_CTRL_RX) {
823 printk(", receive ");
824 if (bp->flow_ctrl & FLOW_CTRL_TX)
825 printk("& transmit ");
826 }
827 else {
828 printk(", transmit ");
829 }
830 printk("flow control ON");
831 }
832 printk("\n");
833 }
834 else {
835 netif_carrier_off(bp->dev);
9b1084b8
MC
836 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
837 bnx2_xceiver_str(bp));
b6016b76 838 }
e3648b3d
MC
839
840 bnx2_report_fw_link(bp);
b6016b76
MC
841}
842
843static void
844bnx2_resolve_flow_ctrl(struct bnx2 *bp)
845{
846 u32 local_adv, remote_adv;
847
848 bp->flow_ctrl = 0;
6aa20a22 849 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
b6016b76
MC
850 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
851
852 if (bp->duplex == DUPLEX_FULL) {
853 bp->flow_ctrl = bp->req_flow_ctrl;
854 }
855 return;
856 }
857
858 if (bp->duplex != DUPLEX_FULL) {
859 return;
860 }
861
583c28e5 862 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
5b0c76ad
MC
863 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
864 u32 val;
865
866 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
867 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
868 bp->flow_ctrl |= FLOW_CTRL_TX;
869 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
870 bp->flow_ctrl |= FLOW_CTRL_RX;
871 return;
872 }
873
ca58c3af
MC
874 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
875 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76 876
583c28e5 877 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
b6016b76
MC
878 u32 new_local_adv = 0;
879 u32 new_remote_adv = 0;
880
881 if (local_adv & ADVERTISE_1000XPAUSE)
882 new_local_adv |= ADVERTISE_PAUSE_CAP;
883 if (local_adv & ADVERTISE_1000XPSE_ASYM)
884 new_local_adv |= ADVERTISE_PAUSE_ASYM;
885 if (remote_adv & ADVERTISE_1000XPAUSE)
886 new_remote_adv |= ADVERTISE_PAUSE_CAP;
887 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
888 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
889
890 local_adv = new_local_adv;
891 remote_adv = new_remote_adv;
892 }
893
894 /* See Table 28B-3 of 802.3ab-1999 spec. */
895 if (local_adv & ADVERTISE_PAUSE_CAP) {
896 if(local_adv & ADVERTISE_PAUSE_ASYM) {
897 if (remote_adv & ADVERTISE_PAUSE_CAP) {
898 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
899 }
900 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
901 bp->flow_ctrl = FLOW_CTRL_RX;
902 }
903 }
904 else {
905 if (remote_adv & ADVERTISE_PAUSE_CAP) {
906 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
907 }
908 }
909 }
910 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
911 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
912 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
913
914 bp->flow_ctrl = FLOW_CTRL_TX;
915 }
916 }
917}
918
27a005b8
MC
919static int
920bnx2_5709s_linkup(struct bnx2 *bp)
921{
922 u32 val, speed;
923
924 bp->link_up = 1;
925
926 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
927 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
928 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
929
930 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
931 bp->line_speed = bp->req_line_speed;
932 bp->duplex = bp->req_duplex;
933 return 0;
934 }
935 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
936 switch (speed) {
937 case MII_BNX2_GP_TOP_AN_SPEED_10:
938 bp->line_speed = SPEED_10;
939 break;
940 case MII_BNX2_GP_TOP_AN_SPEED_100:
941 bp->line_speed = SPEED_100;
942 break;
943 case MII_BNX2_GP_TOP_AN_SPEED_1G:
944 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
945 bp->line_speed = SPEED_1000;
946 break;
947 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
948 bp->line_speed = SPEED_2500;
949 break;
950 }
951 if (val & MII_BNX2_GP_TOP_AN_FD)
952 bp->duplex = DUPLEX_FULL;
953 else
954 bp->duplex = DUPLEX_HALF;
955 return 0;
956}
957
b6016b76 958static int
5b0c76ad
MC
959bnx2_5708s_linkup(struct bnx2 *bp)
960{
961 u32 val;
962
963 bp->link_up = 1;
964 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
965 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
966 case BCM5708S_1000X_STAT1_SPEED_10:
967 bp->line_speed = SPEED_10;
968 break;
969 case BCM5708S_1000X_STAT1_SPEED_100:
970 bp->line_speed = SPEED_100;
971 break;
972 case BCM5708S_1000X_STAT1_SPEED_1G:
973 bp->line_speed = SPEED_1000;
974 break;
975 case BCM5708S_1000X_STAT1_SPEED_2G5:
976 bp->line_speed = SPEED_2500;
977 break;
978 }
979 if (val & BCM5708S_1000X_STAT1_FD)
980 bp->duplex = DUPLEX_FULL;
981 else
982 bp->duplex = DUPLEX_HALF;
983
984 return 0;
985}
986
987static int
988bnx2_5706s_linkup(struct bnx2 *bp)
b6016b76
MC
989{
990 u32 bmcr, local_adv, remote_adv, common;
991
992 bp->link_up = 1;
993 bp->line_speed = SPEED_1000;
994
ca58c3af 995 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
996 if (bmcr & BMCR_FULLDPLX) {
997 bp->duplex = DUPLEX_FULL;
998 }
999 else {
1000 bp->duplex = DUPLEX_HALF;
1001 }
1002
1003 if (!(bmcr & BMCR_ANENABLE)) {
1004 return 0;
1005 }
1006
ca58c3af
MC
1007 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1008 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
1009
1010 common = local_adv & remote_adv;
1011 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1012
1013 if (common & ADVERTISE_1000XFULL) {
1014 bp->duplex = DUPLEX_FULL;
1015 }
1016 else {
1017 bp->duplex = DUPLEX_HALF;
1018 }
1019 }
1020
1021 return 0;
1022}
1023
1024static int
1025bnx2_copper_linkup(struct bnx2 *bp)
1026{
1027 u32 bmcr;
1028
ca58c3af 1029 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1030 if (bmcr & BMCR_ANENABLE) {
1031 u32 local_adv, remote_adv, common;
1032
1033 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1034 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1035
1036 common = local_adv & (remote_adv >> 2);
1037 if (common & ADVERTISE_1000FULL) {
1038 bp->line_speed = SPEED_1000;
1039 bp->duplex = DUPLEX_FULL;
1040 }
1041 else if (common & ADVERTISE_1000HALF) {
1042 bp->line_speed = SPEED_1000;
1043 bp->duplex = DUPLEX_HALF;
1044 }
1045 else {
ca58c3af
MC
1046 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1047 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
1048
1049 common = local_adv & remote_adv;
1050 if (common & ADVERTISE_100FULL) {
1051 bp->line_speed = SPEED_100;
1052 bp->duplex = DUPLEX_FULL;
1053 }
1054 else if (common & ADVERTISE_100HALF) {
1055 bp->line_speed = SPEED_100;
1056 bp->duplex = DUPLEX_HALF;
1057 }
1058 else if (common & ADVERTISE_10FULL) {
1059 bp->line_speed = SPEED_10;
1060 bp->duplex = DUPLEX_FULL;
1061 }
1062 else if (common & ADVERTISE_10HALF) {
1063 bp->line_speed = SPEED_10;
1064 bp->duplex = DUPLEX_HALF;
1065 }
1066 else {
1067 bp->line_speed = 0;
1068 bp->link_up = 0;
1069 }
1070 }
1071 }
1072 else {
1073 if (bmcr & BMCR_SPEED100) {
1074 bp->line_speed = SPEED_100;
1075 }
1076 else {
1077 bp->line_speed = SPEED_10;
1078 }
1079 if (bmcr & BMCR_FULLDPLX) {
1080 bp->duplex = DUPLEX_FULL;
1081 }
1082 else {
1083 bp->duplex = DUPLEX_HALF;
1084 }
1085 }
1086
1087 return 0;
1088}
1089
83e3fc89 1090static void
bb4f98ab 1091bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
83e3fc89 1092{
bb4f98ab 1093 u32 val, rx_cid_addr = GET_CID_ADDR(cid);
83e3fc89
MC
1094
1095 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1096 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1097 val |= 0x02 << 8;
1098
1099 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1100 u32 lo_water, hi_water;
1101
1102 if (bp->flow_ctrl & FLOW_CTRL_TX)
1103 lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1104 else
1105 lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1106 if (lo_water >= bp->rx_ring_size)
1107 lo_water = 0;
1108
1109 hi_water = bp->rx_ring_size / 4;
1110
1111 if (hi_water <= lo_water)
1112 lo_water = 0;
1113
1114 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1115 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1116
1117 if (hi_water > 0xf)
1118 hi_water = 0xf;
1119 else if (hi_water == 0)
1120 lo_water = 0;
1121 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1122 }
1123 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1124}
1125
bb4f98ab
MC
1126static void
1127bnx2_init_all_rx_contexts(struct bnx2 *bp)
1128{
1129 int i;
1130 u32 cid;
1131
1132 for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1133 if (i == 1)
1134 cid = RX_RSS_CID;
1135 bnx2_init_rx_context(bp, cid);
1136 }
1137}
1138
344478db 1139static void
b6016b76
MC
1140bnx2_set_mac_link(struct bnx2 *bp)
1141{
1142 u32 val;
1143
1144 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1145 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1146 (bp->duplex == DUPLEX_HALF)) {
1147 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1148 }
1149
1150 /* Configure the EMAC mode register. */
1151 val = REG_RD(bp, BNX2_EMAC_MODE);
1152
1153 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
5b0c76ad 1154 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
59b47d8a 1155 BNX2_EMAC_MODE_25G_MODE);
b6016b76
MC
1156
1157 if (bp->link_up) {
5b0c76ad
MC
1158 switch (bp->line_speed) {
1159 case SPEED_10:
59b47d8a
MC
1160 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1161 val |= BNX2_EMAC_MODE_PORT_MII_10M;
5b0c76ad
MC
1162 break;
1163 }
1164 /* fall through */
1165 case SPEED_100:
1166 val |= BNX2_EMAC_MODE_PORT_MII;
1167 break;
1168 case SPEED_2500:
59b47d8a 1169 val |= BNX2_EMAC_MODE_25G_MODE;
5b0c76ad
MC
1170 /* fall through */
1171 case SPEED_1000:
1172 val |= BNX2_EMAC_MODE_PORT_GMII;
1173 break;
1174 }
b6016b76
MC
1175 }
1176 else {
1177 val |= BNX2_EMAC_MODE_PORT_GMII;
1178 }
1179
1180 /* Set the MAC to operate in the appropriate duplex mode. */
1181 if (bp->duplex == DUPLEX_HALF)
1182 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1183 REG_WR(bp, BNX2_EMAC_MODE, val);
1184
1185 /* Enable/disable rx PAUSE. */
1186 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1187
1188 if (bp->flow_ctrl & FLOW_CTRL_RX)
1189 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1190 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1191
1192 /* Enable/disable tx PAUSE. */
1193 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1194 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1195
1196 if (bp->flow_ctrl & FLOW_CTRL_TX)
1197 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1198 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1199
1200 /* Acknowledge the interrupt. */
1201 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1202
83e3fc89 1203 if (CHIP_NUM(bp) == CHIP_NUM_5709)
bb4f98ab 1204 bnx2_init_all_rx_contexts(bp);
b6016b76
MC
1205}
1206
27a005b8
MC
1207static void
1208bnx2_enable_bmsr1(struct bnx2 *bp)
1209{
583c28e5 1210 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
27a005b8
MC
1211 (CHIP_NUM(bp) == CHIP_NUM_5709))
1212 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1213 MII_BNX2_BLK_ADDR_GP_STATUS);
1214}
1215
1216static void
1217bnx2_disable_bmsr1(struct bnx2 *bp)
1218{
583c28e5 1219 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
27a005b8
MC
1220 (CHIP_NUM(bp) == CHIP_NUM_5709))
1221 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1222 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1223}
1224
605a9e20
MC
1225static int
1226bnx2_test_and_enable_2g5(struct bnx2 *bp)
1227{
1228 u32 up1;
1229 int ret = 1;
1230
583c28e5 1231 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
605a9e20
MC
1232 return 0;
1233
1234 if (bp->autoneg & AUTONEG_SPEED)
1235 bp->advertising |= ADVERTISED_2500baseX_Full;
1236
27a005b8
MC
1237 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1238 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1239
605a9e20
MC
1240 bnx2_read_phy(bp, bp->mii_up1, &up1);
1241 if (!(up1 & BCM5708S_UP1_2G5)) {
1242 up1 |= BCM5708S_UP1_2G5;
1243 bnx2_write_phy(bp, bp->mii_up1, up1);
1244 ret = 0;
1245 }
1246
27a005b8
MC
1247 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1248 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1249 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1250
605a9e20
MC
1251 return ret;
1252}
1253
1254static int
1255bnx2_test_and_disable_2g5(struct bnx2 *bp)
1256{
1257 u32 up1;
1258 int ret = 0;
1259
583c28e5 1260 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
605a9e20
MC
1261 return 0;
1262
27a005b8
MC
1263 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1264 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1265
605a9e20
MC
1266 bnx2_read_phy(bp, bp->mii_up1, &up1);
1267 if (up1 & BCM5708S_UP1_2G5) {
1268 up1 &= ~BCM5708S_UP1_2G5;
1269 bnx2_write_phy(bp, bp->mii_up1, up1);
1270 ret = 1;
1271 }
1272
27a005b8
MC
1273 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1274 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1275 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1276
605a9e20
MC
1277 return ret;
1278}
1279
1280static void
1281bnx2_enable_forced_2g5(struct bnx2 *bp)
1282{
1283 u32 bmcr;
1284
583c28e5 1285 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
605a9e20
MC
1286 return;
1287
27a005b8
MC
1288 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1289 u32 val;
1290
1291 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1292 MII_BNX2_BLK_ADDR_SERDES_DIG);
1293 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1294 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1295 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1296 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1297
1298 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1299 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1300 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1301
1302 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1303 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1304 bmcr |= BCM5708S_BMCR_FORCE_2500;
1305 }
1306
1307 if (bp->autoneg & AUTONEG_SPEED) {
1308 bmcr &= ~BMCR_ANENABLE;
1309 if (bp->req_duplex == DUPLEX_FULL)
1310 bmcr |= BMCR_FULLDPLX;
1311 }
1312 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1313}
1314
1315static void
1316bnx2_disable_forced_2g5(struct bnx2 *bp)
1317{
1318 u32 bmcr;
1319
583c28e5 1320 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
605a9e20
MC
1321 return;
1322
27a005b8
MC
1323 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1324 u32 val;
1325
1326 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1327 MII_BNX2_BLK_ADDR_SERDES_DIG);
1328 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1329 val &= ~MII_BNX2_SD_MISC1_FORCE;
1330 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1331
1332 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1333 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1334 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1335
1336 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1337 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1338 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1339 }
1340
1341 if (bp->autoneg & AUTONEG_SPEED)
1342 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1343 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1344}
1345
b2fadeae
MC
1346static void
1347bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1348{
1349 u32 val;
1350
1351 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1352 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1353 if (start)
1354 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1355 else
1356 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1357}
1358
b6016b76
MC
1359static int
1360bnx2_set_link(struct bnx2 *bp)
1361{
1362 u32 bmsr;
1363 u8 link_up;
1364
80be4434 1365 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
b6016b76
MC
1366 bp->link_up = 1;
1367 return 0;
1368 }
1369
583c28e5 1370 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
1371 return 0;
1372
b6016b76
MC
1373 link_up = bp->link_up;
1374
27a005b8
MC
1375 bnx2_enable_bmsr1(bp);
1376 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1377 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1378 bnx2_disable_bmsr1(bp);
b6016b76 1379
583c28e5 1380 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
b6016b76 1381 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
a2724e25 1382 u32 val, an_dbg;
b6016b76 1383
583c28e5 1384 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
b2fadeae 1385 bnx2_5706s_force_link_dn(bp, 0);
583c28e5 1386 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
b2fadeae 1387 }
b6016b76 1388 val = REG_RD(bp, BNX2_EMAC_STATUS);
a2724e25
MC
1389
1390 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1391 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1392 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1393
1394 if ((val & BNX2_EMAC_STATUS_LINK) &&
1395 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
b6016b76
MC
1396 bmsr |= BMSR_LSTATUS;
1397 else
1398 bmsr &= ~BMSR_LSTATUS;
1399 }
1400
1401 if (bmsr & BMSR_LSTATUS) {
1402 bp->link_up = 1;
1403
583c28e5 1404 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5b0c76ad
MC
1405 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1406 bnx2_5706s_linkup(bp);
1407 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1408 bnx2_5708s_linkup(bp);
27a005b8
MC
1409 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1410 bnx2_5709s_linkup(bp);
b6016b76
MC
1411 }
1412 else {
1413 bnx2_copper_linkup(bp);
1414 }
1415 bnx2_resolve_flow_ctrl(bp);
1416 }
1417 else {
583c28e5 1418 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
605a9e20
MC
1419 (bp->autoneg & AUTONEG_SPEED))
1420 bnx2_disable_forced_2g5(bp);
b6016b76 1421
583c28e5 1422 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
b2fadeae
MC
1423 u32 bmcr;
1424
1425 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1426 bmcr |= BMCR_ANENABLE;
1427 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1428
583c28e5 1429 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
b2fadeae 1430 }
b6016b76
MC
1431 bp->link_up = 0;
1432 }
1433
1434 if (bp->link_up != link_up) {
1435 bnx2_report_link(bp);
1436 }
1437
1438 bnx2_set_mac_link(bp);
1439
1440 return 0;
1441}
1442
1443static int
1444bnx2_reset_phy(struct bnx2 *bp)
1445{
1446 int i;
1447 u32 reg;
1448
ca58c3af 1449 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
b6016b76
MC
1450
1451#define PHY_RESET_MAX_WAIT 100
1452 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1453 udelay(10);
1454
ca58c3af 1455 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
b6016b76
MC
1456 if (!(reg & BMCR_RESET)) {
1457 udelay(20);
1458 break;
1459 }
1460 }
1461 if (i == PHY_RESET_MAX_WAIT) {
1462 return -EBUSY;
1463 }
1464 return 0;
1465}
1466
1467static u32
1468bnx2_phy_get_pause_adv(struct bnx2 *bp)
1469{
1470 u32 adv = 0;
1471
1472 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1473 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1474
583c28e5 1475 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
b6016b76
MC
1476 adv = ADVERTISE_1000XPAUSE;
1477 }
1478 else {
1479 adv = ADVERTISE_PAUSE_CAP;
1480 }
1481 }
1482 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
583c28e5 1483 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
b6016b76
MC
1484 adv = ADVERTISE_1000XPSE_ASYM;
1485 }
1486 else {
1487 adv = ADVERTISE_PAUSE_ASYM;
1488 }
1489 }
1490 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
583c28e5 1491 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
b6016b76
MC
1492 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1493 }
1494 else {
1495 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1496 }
1497 }
1498 return adv;
1499}
1500
a2f13890 1501static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
0d8a6571 1502
b6016b76 1503static int
0d8a6571 1504bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
52d07b1f
HH
1505__releases(&bp->phy_lock)
1506__acquires(&bp->phy_lock)
0d8a6571
MC
1507{
1508 u32 speed_arg = 0, pause_adv;
1509
1510 pause_adv = bnx2_phy_get_pause_adv(bp);
1511
1512 if (bp->autoneg & AUTONEG_SPEED) {
1513 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1514 if (bp->advertising & ADVERTISED_10baseT_Half)
1515 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1516 if (bp->advertising & ADVERTISED_10baseT_Full)
1517 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1518 if (bp->advertising & ADVERTISED_100baseT_Half)
1519 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1520 if (bp->advertising & ADVERTISED_100baseT_Full)
1521 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1522 if (bp->advertising & ADVERTISED_1000baseT_Full)
1523 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1524 if (bp->advertising & ADVERTISED_2500baseX_Full)
1525 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1526 } else {
1527 if (bp->req_line_speed == SPEED_2500)
1528 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1529 else if (bp->req_line_speed == SPEED_1000)
1530 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1531 else if (bp->req_line_speed == SPEED_100) {
1532 if (bp->req_duplex == DUPLEX_FULL)
1533 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1534 else
1535 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1536 } else if (bp->req_line_speed == SPEED_10) {
1537 if (bp->req_duplex == DUPLEX_FULL)
1538 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1539 else
1540 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1541 }
1542 }
1543
1544 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1545 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
c26736ec 1546 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
0d8a6571
MC
1547 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1548
1549 if (port == PORT_TP)
1550 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1551 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1552
2726d6e1 1553 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
0d8a6571
MC
1554
1555 spin_unlock_bh(&bp->phy_lock);
a2f13890 1556 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
0d8a6571
MC
1557 spin_lock_bh(&bp->phy_lock);
1558
1559 return 0;
1560}
1561
1562static int
1563bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
52d07b1f
HH
1564__releases(&bp->phy_lock)
1565__acquires(&bp->phy_lock)
b6016b76 1566{
605a9e20 1567 u32 adv, bmcr;
b6016b76
MC
1568 u32 new_adv = 0;
1569
583c28e5 1570 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
1571 return (bnx2_setup_remote_phy(bp, port));
1572
b6016b76
MC
1573 if (!(bp->autoneg & AUTONEG_SPEED)) {
1574 u32 new_bmcr;
5b0c76ad
MC
1575 int force_link_down = 0;
1576
605a9e20
MC
1577 if (bp->req_line_speed == SPEED_2500) {
1578 if (!bnx2_test_and_enable_2g5(bp))
1579 force_link_down = 1;
1580 } else if (bp->req_line_speed == SPEED_1000) {
1581 if (bnx2_test_and_disable_2g5(bp))
1582 force_link_down = 1;
1583 }
ca58c3af 1584 bnx2_read_phy(bp, bp->mii_adv, &adv);
80be4434
MC
1585 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1586
ca58c3af 1587 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
605a9e20 1588 new_bmcr = bmcr & ~BMCR_ANENABLE;
80be4434 1589 new_bmcr |= BMCR_SPEED1000;
605a9e20 1590
27a005b8
MC
1591 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1592 if (bp->req_line_speed == SPEED_2500)
1593 bnx2_enable_forced_2g5(bp);
1594 else if (bp->req_line_speed == SPEED_1000) {
1595 bnx2_disable_forced_2g5(bp);
1596 new_bmcr &= ~0x2000;
1597 }
1598
1599 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1600 if (bp->req_line_speed == SPEED_2500)
1601 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1602 else
1603 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
5b0c76ad
MC
1604 }
1605
b6016b76 1606 if (bp->req_duplex == DUPLEX_FULL) {
5b0c76ad 1607 adv |= ADVERTISE_1000XFULL;
b6016b76
MC
1608 new_bmcr |= BMCR_FULLDPLX;
1609 }
1610 else {
5b0c76ad 1611 adv |= ADVERTISE_1000XHALF;
b6016b76
MC
1612 new_bmcr &= ~BMCR_FULLDPLX;
1613 }
5b0c76ad 1614 if ((new_bmcr != bmcr) || (force_link_down)) {
b6016b76
MC
1615 /* Force a link down visible on the other side */
1616 if (bp->link_up) {
ca58c3af 1617 bnx2_write_phy(bp, bp->mii_adv, adv &
5b0c76ad
MC
1618 ~(ADVERTISE_1000XFULL |
1619 ADVERTISE_1000XHALF));
ca58c3af 1620 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
b6016b76
MC
1621 BMCR_ANRESTART | BMCR_ANENABLE);
1622
1623 bp->link_up = 0;
1624 netif_carrier_off(bp->dev);
ca58c3af 1625 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
80be4434 1626 bnx2_report_link(bp);
b6016b76 1627 }
ca58c3af
MC
1628 bnx2_write_phy(bp, bp->mii_adv, adv);
1629 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
605a9e20
MC
1630 } else {
1631 bnx2_resolve_flow_ctrl(bp);
1632 bnx2_set_mac_link(bp);
b6016b76
MC
1633 }
1634 return 0;
1635 }
1636
605a9e20 1637 bnx2_test_and_enable_2g5(bp);
5b0c76ad 1638
b6016b76
MC
1639 if (bp->advertising & ADVERTISED_1000baseT_Full)
1640 new_adv |= ADVERTISE_1000XFULL;
1641
1642 new_adv |= bnx2_phy_get_pause_adv(bp);
1643
ca58c3af
MC
1644 bnx2_read_phy(bp, bp->mii_adv, &adv);
1645 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1646
1647 bp->serdes_an_pending = 0;
1648 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1649 /* Force a link down visible on the other side */
1650 if (bp->link_up) {
ca58c3af 1651 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
80be4434
MC
1652 spin_unlock_bh(&bp->phy_lock);
1653 msleep(20);
1654 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
1655 }
1656
ca58c3af
MC
1657 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1658 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
b6016b76 1659 BMCR_ANENABLE);
f8dd064e
MC
1660 /* Speed up link-up time when the link partner
1661 * does not autonegotiate which is very common
1662 * in blade servers. Some blade servers use
1663 * IPMI for kerboard input and it's important
1664 * to minimize link disruptions. Autoneg. involves
1665 * exchanging base pages plus 3 next pages and
1666 * normally completes in about 120 msec.
1667 */
40105c0b 1668 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
f8dd064e
MC
1669 bp->serdes_an_pending = 1;
1670 mod_timer(&bp->timer, jiffies + bp->current_interval);
605a9e20
MC
1671 } else {
1672 bnx2_resolve_flow_ctrl(bp);
1673 bnx2_set_mac_link(bp);
b6016b76
MC
1674 }
1675
1676 return 0;
1677}
1678
1679#define ETHTOOL_ALL_FIBRE_SPEED \
583c28e5 1680 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
deaf391b
MC
1681 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1682 (ADVERTISED_1000baseT_Full)
b6016b76
MC
1683
1684#define ETHTOOL_ALL_COPPER_SPEED \
1685 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1686 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1687 ADVERTISED_1000baseT_Full)
1688
1689#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1690 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
6aa20a22 1691
b6016b76
MC
1692#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1693
0d8a6571
MC
1694static void
1695bnx2_set_default_remote_link(struct bnx2 *bp)
1696{
1697 u32 link;
1698
1699 if (bp->phy_port == PORT_TP)
2726d6e1 1700 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
0d8a6571 1701 else
2726d6e1 1702 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
0d8a6571
MC
1703
1704 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1705 bp->req_line_speed = 0;
1706 bp->autoneg |= AUTONEG_SPEED;
1707 bp->advertising = ADVERTISED_Autoneg;
1708 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1709 bp->advertising |= ADVERTISED_10baseT_Half;
1710 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1711 bp->advertising |= ADVERTISED_10baseT_Full;
1712 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1713 bp->advertising |= ADVERTISED_100baseT_Half;
1714 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1715 bp->advertising |= ADVERTISED_100baseT_Full;
1716 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1717 bp->advertising |= ADVERTISED_1000baseT_Full;
1718 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1719 bp->advertising |= ADVERTISED_2500baseX_Full;
1720 } else {
1721 bp->autoneg = 0;
1722 bp->advertising = 0;
1723 bp->req_duplex = DUPLEX_FULL;
1724 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1725 bp->req_line_speed = SPEED_10;
1726 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1727 bp->req_duplex = DUPLEX_HALF;
1728 }
1729 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1730 bp->req_line_speed = SPEED_100;
1731 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1732 bp->req_duplex = DUPLEX_HALF;
1733 }
1734 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1735 bp->req_line_speed = SPEED_1000;
1736 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1737 bp->req_line_speed = SPEED_2500;
1738 }
1739}
1740
deaf391b
MC
1741static void
1742bnx2_set_default_link(struct bnx2 *bp)
1743{
ab59859d
HH
1744 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1745 bnx2_set_default_remote_link(bp);
1746 return;
1747 }
0d8a6571 1748
deaf391b
MC
1749 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1750 bp->req_line_speed = 0;
583c28e5 1751 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
deaf391b
MC
1752 u32 reg;
1753
1754 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1755
2726d6e1 1756 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
deaf391b
MC
1757 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1758 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1759 bp->autoneg = 0;
1760 bp->req_line_speed = bp->line_speed = SPEED_1000;
1761 bp->req_duplex = DUPLEX_FULL;
1762 }
1763 } else
1764 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1765}
1766
df149d70
MC
1767static void
1768bnx2_send_heart_beat(struct bnx2 *bp)
1769{
1770 u32 msg;
1771 u32 addr;
1772
1773 spin_lock(&bp->indirect_lock);
1774 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1775 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1776 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1777 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1778 spin_unlock(&bp->indirect_lock);
1779}
1780
0d8a6571
MC
1781static void
1782bnx2_remote_phy_event(struct bnx2 *bp)
1783{
1784 u32 msg;
1785 u8 link_up = bp->link_up;
1786 u8 old_port;
1787
2726d6e1 1788 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
0d8a6571 1789
df149d70
MC
1790 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1791 bnx2_send_heart_beat(bp);
1792
1793 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1794
0d8a6571
MC
1795 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1796 bp->link_up = 0;
1797 else {
1798 u32 speed;
1799
1800 bp->link_up = 1;
1801 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1802 bp->duplex = DUPLEX_FULL;
1803 switch (speed) {
1804 case BNX2_LINK_STATUS_10HALF:
1805 bp->duplex = DUPLEX_HALF;
1806 case BNX2_LINK_STATUS_10FULL:
1807 bp->line_speed = SPEED_10;
1808 break;
1809 case BNX2_LINK_STATUS_100HALF:
1810 bp->duplex = DUPLEX_HALF;
1811 case BNX2_LINK_STATUS_100BASE_T4:
1812 case BNX2_LINK_STATUS_100FULL:
1813 bp->line_speed = SPEED_100;
1814 break;
1815 case BNX2_LINK_STATUS_1000HALF:
1816 bp->duplex = DUPLEX_HALF;
1817 case BNX2_LINK_STATUS_1000FULL:
1818 bp->line_speed = SPEED_1000;
1819 break;
1820 case BNX2_LINK_STATUS_2500HALF:
1821 bp->duplex = DUPLEX_HALF;
1822 case BNX2_LINK_STATUS_2500FULL:
1823 bp->line_speed = SPEED_2500;
1824 break;
1825 default:
1826 bp->line_speed = 0;
1827 break;
1828 }
1829
0d8a6571
MC
1830 bp->flow_ctrl = 0;
1831 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1832 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1833 if (bp->duplex == DUPLEX_FULL)
1834 bp->flow_ctrl = bp->req_flow_ctrl;
1835 } else {
1836 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1837 bp->flow_ctrl |= FLOW_CTRL_TX;
1838 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1839 bp->flow_ctrl |= FLOW_CTRL_RX;
1840 }
1841
1842 old_port = bp->phy_port;
1843 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1844 bp->phy_port = PORT_FIBRE;
1845 else
1846 bp->phy_port = PORT_TP;
1847
1848 if (old_port != bp->phy_port)
1849 bnx2_set_default_link(bp);
1850
0d8a6571
MC
1851 }
1852 if (bp->link_up != link_up)
1853 bnx2_report_link(bp);
1854
1855 bnx2_set_mac_link(bp);
1856}
1857
1858static int
1859bnx2_set_remote_link(struct bnx2 *bp)
1860{
1861 u32 evt_code;
1862
2726d6e1 1863 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
0d8a6571
MC
1864 switch (evt_code) {
1865 case BNX2_FW_EVT_CODE_LINK_EVENT:
1866 bnx2_remote_phy_event(bp);
1867 break;
1868 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1869 default:
df149d70 1870 bnx2_send_heart_beat(bp);
0d8a6571
MC
1871 break;
1872 }
1873 return 0;
1874}
1875
b6016b76
MC
1876static int
1877bnx2_setup_copper_phy(struct bnx2 *bp)
52d07b1f
HH
1878__releases(&bp->phy_lock)
1879__acquires(&bp->phy_lock)
b6016b76
MC
1880{
1881 u32 bmcr;
1882 u32 new_bmcr;
1883
ca58c3af 1884 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1885
1886 if (bp->autoneg & AUTONEG_SPEED) {
1887 u32 adv_reg, adv1000_reg;
1888 u32 new_adv_reg = 0;
1889 u32 new_adv1000_reg = 0;
1890
ca58c3af 1891 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
b6016b76
MC
1892 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1893 ADVERTISE_PAUSE_ASYM);
1894
1895 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1896 adv1000_reg &= PHY_ALL_1000_SPEED;
1897
1898 if (bp->advertising & ADVERTISED_10baseT_Half)
1899 new_adv_reg |= ADVERTISE_10HALF;
1900 if (bp->advertising & ADVERTISED_10baseT_Full)
1901 new_adv_reg |= ADVERTISE_10FULL;
1902 if (bp->advertising & ADVERTISED_100baseT_Half)
1903 new_adv_reg |= ADVERTISE_100HALF;
1904 if (bp->advertising & ADVERTISED_100baseT_Full)
1905 new_adv_reg |= ADVERTISE_100FULL;
1906 if (bp->advertising & ADVERTISED_1000baseT_Full)
1907 new_adv1000_reg |= ADVERTISE_1000FULL;
6aa20a22 1908
b6016b76
MC
1909 new_adv_reg |= ADVERTISE_CSMA;
1910
1911 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1912
1913 if ((adv1000_reg != new_adv1000_reg) ||
1914 (adv_reg != new_adv_reg) ||
1915 ((bmcr & BMCR_ANENABLE) == 0)) {
1916
ca58c3af 1917 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
b6016b76 1918 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
ca58c3af 1919 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
b6016b76
MC
1920 BMCR_ANENABLE);
1921 }
1922 else if (bp->link_up) {
1923 /* Flow ctrl may have changed from auto to forced */
1924 /* or vice-versa. */
1925
1926 bnx2_resolve_flow_ctrl(bp);
1927 bnx2_set_mac_link(bp);
1928 }
1929 return 0;
1930 }
1931
1932 new_bmcr = 0;
1933 if (bp->req_line_speed == SPEED_100) {
1934 new_bmcr |= BMCR_SPEED100;
1935 }
1936 if (bp->req_duplex == DUPLEX_FULL) {
1937 new_bmcr |= BMCR_FULLDPLX;
1938 }
1939 if (new_bmcr != bmcr) {
1940 u32 bmsr;
b6016b76 1941
ca58c3af
MC
1942 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1943 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
6aa20a22 1944
b6016b76
MC
1945 if (bmsr & BMSR_LSTATUS) {
1946 /* Force link down */
ca58c3af 1947 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
a16dda0e
MC
1948 spin_unlock_bh(&bp->phy_lock);
1949 msleep(50);
1950 spin_lock_bh(&bp->phy_lock);
1951
ca58c3af
MC
1952 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1953 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
b6016b76
MC
1954 }
1955
ca58c3af 1956 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
b6016b76
MC
1957
1958 /* Normally, the new speed is setup after the link has
1959 * gone down and up again. In some cases, link will not go
1960 * down so we need to set up the new speed here.
1961 */
1962 if (bmsr & BMSR_LSTATUS) {
1963 bp->line_speed = bp->req_line_speed;
1964 bp->duplex = bp->req_duplex;
1965 bnx2_resolve_flow_ctrl(bp);
1966 bnx2_set_mac_link(bp);
1967 }
27a005b8
MC
1968 } else {
1969 bnx2_resolve_flow_ctrl(bp);
1970 bnx2_set_mac_link(bp);
b6016b76
MC
1971 }
1972 return 0;
1973}
1974
1975static int
0d8a6571 1976bnx2_setup_phy(struct bnx2 *bp, u8 port)
52d07b1f
HH
1977__releases(&bp->phy_lock)
1978__acquires(&bp->phy_lock)
b6016b76
MC
1979{
1980 if (bp->loopback == MAC_LOOPBACK)
1981 return 0;
1982
583c28e5 1983 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
0d8a6571 1984 return (bnx2_setup_serdes_phy(bp, port));
b6016b76
MC
1985 }
1986 else {
1987 return (bnx2_setup_copper_phy(bp));
1988 }
1989}
1990
27a005b8 1991static int
9a120bc5 1992bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
27a005b8
MC
1993{
1994 u32 val;
1995
1996 bp->mii_bmcr = MII_BMCR + 0x10;
1997 bp->mii_bmsr = MII_BMSR + 0x10;
1998 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1999 bp->mii_adv = MII_ADVERTISE + 0x10;
2000 bp->mii_lpa = MII_LPA + 0x10;
2001 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2002
2003 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2004 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2005
2006 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
9a120bc5
MC
2007 if (reset_phy)
2008 bnx2_reset_phy(bp);
27a005b8
MC
2009
2010 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2011
2012 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2013 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2014 val |= MII_BNX2_SD_1000XCTL1_FIBER;
2015 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2016
2017 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2018 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
583c28e5 2019 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
27a005b8
MC
2020 val |= BCM5708S_UP1_2G5;
2021 else
2022 val &= ~BCM5708S_UP1_2G5;
2023 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2024
2025 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2026 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2027 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2028 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2029
2030 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2031
2032 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2033 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2034 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2035
2036 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2037
2038 return 0;
2039}
2040
b6016b76 2041static int
9a120bc5 2042bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
5b0c76ad
MC
2043{
2044 u32 val;
2045
9a120bc5
MC
2046 if (reset_phy)
2047 bnx2_reset_phy(bp);
27a005b8
MC
2048
2049 bp->mii_up1 = BCM5708S_UP1;
2050
5b0c76ad
MC
2051 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2052 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2053 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2054
2055 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2056 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2057 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2058
2059 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2060 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2061 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2062
583c28e5 2063 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
5b0c76ad
MC
2064 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2065 val |= BCM5708S_UP1_2G5;
2066 bnx2_write_phy(bp, BCM5708S_UP1, val);
2067 }
2068
2069 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
dda1e390
MC
2070 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2071 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
5b0c76ad
MC
2072 /* increase tx signal amplitude */
2073 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2074 BCM5708S_BLK_ADDR_TX_MISC);
2075 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2076 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2077 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2078 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2079 }
2080
2726d6e1 2081 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
5b0c76ad
MC
2082 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2083
2084 if (val) {
2085 u32 is_backplane;
2086
2726d6e1 2087 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
5b0c76ad
MC
2088 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2089 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2090 BCM5708S_BLK_ADDR_TX_MISC);
2091 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2092 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2093 BCM5708S_BLK_ADDR_DIG);
2094 }
2095 }
2096 return 0;
2097}
2098
2099static int
9a120bc5 2100bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
b6016b76 2101{
9a120bc5
MC
2102 if (reset_phy)
2103 bnx2_reset_phy(bp);
27a005b8 2104
583c28e5 2105 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
b6016b76 2106
59b47d8a
MC
2107 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2108 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
b6016b76
MC
2109
2110 if (bp->dev->mtu > 1500) {
2111 u32 val;
2112
2113 /* Set extended packet length bit */
2114 bnx2_write_phy(bp, 0x18, 0x7);
2115 bnx2_read_phy(bp, 0x18, &val);
2116 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2117
2118 bnx2_write_phy(bp, 0x1c, 0x6c00);
2119 bnx2_read_phy(bp, 0x1c, &val);
2120 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2121 }
2122 else {
2123 u32 val;
2124
2125 bnx2_write_phy(bp, 0x18, 0x7);
2126 bnx2_read_phy(bp, 0x18, &val);
2127 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2128
2129 bnx2_write_phy(bp, 0x1c, 0x6c00);
2130 bnx2_read_phy(bp, 0x1c, &val);
2131 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2132 }
2133
2134 return 0;
2135}
2136
2137static int
9a120bc5 2138bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
b6016b76 2139{
5b0c76ad
MC
2140 u32 val;
2141
9a120bc5
MC
2142 if (reset_phy)
2143 bnx2_reset_phy(bp);
27a005b8 2144
583c28e5 2145 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
b6016b76
MC
2146 bnx2_write_phy(bp, 0x18, 0x0c00);
2147 bnx2_write_phy(bp, 0x17, 0x000a);
2148 bnx2_write_phy(bp, 0x15, 0x310b);
2149 bnx2_write_phy(bp, 0x17, 0x201f);
2150 bnx2_write_phy(bp, 0x15, 0x9506);
2151 bnx2_write_phy(bp, 0x17, 0x401f);
2152 bnx2_write_phy(bp, 0x15, 0x14e2);
2153 bnx2_write_phy(bp, 0x18, 0x0400);
2154 }
2155
583c28e5 2156 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
b659f44e
MC
2157 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2158 MII_BNX2_DSP_EXPAND_REG | 0x8);
2159 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2160 val &= ~(1 << 8);
2161 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2162 }
2163
b6016b76 2164 if (bp->dev->mtu > 1500) {
b6016b76
MC
2165 /* Set extended packet length bit */
2166 bnx2_write_phy(bp, 0x18, 0x7);
2167 bnx2_read_phy(bp, 0x18, &val);
2168 bnx2_write_phy(bp, 0x18, val | 0x4000);
2169
2170 bnx2_read_phy(bp, 0x10, &val);
2171 bnx2_write_phy(bp, 0x10, val | 0x1);
2172 }
2173 else {
b6016b76
MC
2174 bnx2_write_phy(bp, 0x18, 0x7);
2175 bnx2_read_phy(bp, 0x18, &val);
2176 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2177
2178 bnx2_read_phy(bp, 0x10, &val);
2179 bnx2_write_phy(bp, 0x10, val & ~0x1);
2180 }
2181
5b0c76ad
MC
2182 /* ethernet@wirespeed */
2183 bnx2_write_phy(bp, 0x18, 0x7007);
2184 bnx2_read_phy(bp, 0x18, &val);
2185 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
b6016b76
MC
2186 return 0;
2187}
2188
2189
2190static int
9a120bc5 2191bnx2_init_phy(struct bnx2 *bp, int reset_phy)
52d07b1f
HH
2192__releases(&bp->phy_lock)
2193__acquires(&bp->phy_lock)
b6016b76
MC
2194{
2195 u32 val;
2196 int rc = 0;
2197
583c28e5
MC
2198 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2199 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
b6016b76 2200
ca58c3af
MC
2201 bp->mii_bmcr = MII_BMCR;
2202 bp->mii_bmsr = MII_BMSR;
27a005b8 2203 bp->mii_bmsr1 = MII_BMSR;
ca58c3af
MC
2204 bp->mii_adv = MII_ADVERTISE;
2205 bp->mii_lpa = MII_LPA;
2206
b6016b76
MC
2207 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2208
583c28e5 2209 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
2210 goto setup_phy;
2211
b6016b76
MC
2212 bnx2_read_phy(bp, MII_PHYSID1, &val);
2213 bp->phy_id = val << 16;
2214 bnx2_read_phy(bp, MII_PHYSID2, &val);
2215 bp->phy_id |= val & 0xffff;
2216
583c28e5 2217 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5b0c76ad 2218 if (CHIP_NUM(bp) == CHIP_NUM_5706)
9a120bc5 2219 rc = bnx2_init_5706s_phy(bp, reset_phy);
5b0c76ad 2220 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
9a120bc5 2221 rc = bnx2_init_5708s_phy(bp, reset_phy);
27a005b8 2222 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
9a120bc5 2223 rc = bnx2_init_5709s_phy(bp, reset_phy);
b6016b76
MC
2224 }
2225 else {
9a120bc5 2226 rc = bnx2_init_copper_phy(bp, reset_phy);
b6016b76
MC
2227 }
2228
0d8a6571
MC
2229setup_phy:
2230 if (!rc)
2231 rc = bnx2_setup_phy(bp, bp->phy_port);
b6016b76
MC
2232
2233 return rc;
2234}
2235
2236static int
2237bnx2_set_mac_loopback(struct bnx2 *bp)
2238{
2239 u32 mac_mode;
2240
2241 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2242 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2243 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2244 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2245 bp->link_up = 1;
2246 return 0;
2247}
2248
bc5a0690
MC
2249static int bnx2_test_link(struct bnx2 *);
2250
2251static int
2252bnx2_set_phy_loopback(struct bnx2 *bp)
2253{
2254 u32 mac_mode;
2255 int rc, i;
2256
2257 spin_lock_bh(&bp->phy_lock);
ca58c3af 2258 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
bc5a0690
MC
2259 BMCR_SPEED1000);
2260 spin_unlock_bh(&bp->phy_lock);
2261 if (rc)
2262 return rc;
2263
2264 for (i = 0; i < 10; i++) {
2265 if (bnx2_test_link(bp) == 0)
2266 break;
80be4434 2267 msleep(100);
bc5a0690
MC
2268 }
2269
2270 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2271 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2272 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
59b47d8a 2273 BNX2_EMAC_MODE_25G_MODE);
bc5a0690
MC
2274
2275 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2276 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2277 bp->link_up = 1;
2278 return 0;
2279}
2280
b6016b76 2281static int
a2f13890 2282bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
b6016b76
MC
2283{
2284 int i;
2285 u32 val;
2286
b6016b76
MC
2287 bp->fw_wr_seq++;
2288 msg_data |= bp->fw_wr_seq;
2289
2726d6e1 2290 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
b6016b76 2291
a2f13890
MC
2292 if (!ack)
2293 return 0;
2294
b6016b76 2295 /* wait for an acknowledgement. */
40105c0b 2296 for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
b090ae2b 2297 msleep(10);
b6016b76 2298
2726d6e1 2299 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
b6016b76
MC
2300
2301 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2302 break;
2303 }
b090ae2b
MC
2304 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2305 return 0;
b6016b76
MC
2306
2307 /* If we timed out, inform the firmware that this is the case. */
b090ae2b
MC
2308 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2309 if (!silent)
2310 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2311 "%x\n", msg_data);
b6016b76
MC
2312
2313 msg_data &= ~BNX2_DRV_MSG_CODE;
2314 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2315
2726d6e1 2316 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
b6016b76 2317
b6016b76
MC
2318 return -EBUSY;
2319 }
2320
b090ae2b
MC
2321 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2322 return -EIO;
2323
b6016b76
MC
2324 return 0;
2325}
2326
59b47d8a
MC
2327static int
2328bnx2_init_5709_context(struct bnx2 *bp)
2329{
2330 int i, ret = 0;
2331 u32 val;
2332
2333 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2334 val |= (BCM_PAGE_BITS - 8) << 16;
2335 REG_WR(bp, BNX2_CTX_COMMAND, val);
641bdcd5
MC
2336 for (i = 0; i < 10; i++) {
2337 val = REG_RD(bp, BNX2_CTX_COMMAND);
2338 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2339 break;
2340 udelay(2);
2341 }
2342 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2343 return -EBUSY;
2344
59b47d8a
MC
2345 for (i = 0; i < bp->ctx_pages; i++) {
2346 int j;
2347
352f7687
MC
2348 if (bp->ctx_blk[i])
2349 memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2350 else
2351 return -ENOMEM;
2352
59b47d8a
MC
2353 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2354 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2355 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2356 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2357 (u64) bp->ctx_blk_mapping[i] >> 32);
2358 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2359 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2360 for (j = 0; j < 10; j++) {
2361
2362 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2363 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2364 break;
2365 udelay(5);
2366 }
2367 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2368 ret = -EBUSY;
2369 break;
2370 }
2371 }
2372 return ret;
2373}
2374
b6016b76
MC
2375static void
2376bnx2_init_context(struct bnx2 *bp)
2377{
2378 u32 vcid;
2379
2380 vcid = 96;
2381 while (vcid) {
2382 u32 vcid_addr, pcid_addr, offset;
7947b20e 2383 int i;
b6016b76
MC
2384
2385 vcid--;
2386
2387 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2388 u32 new_vcid;
2389
2390 vcid_addr = GET_PCID_ADDR(vcid);
2391 if (vcid & 0x8) {
2392 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2393 }
2394 else {
2395 new_vcid = vcid;
2396 }
2397 pcid_addr = GET_PCID_ADDR(new_vcid);
2398 }
2399 else {
2400 vcid_addr = GET_CID_ADDR(vcid);
2401 pcid_addr = vcid_addr;
2402 }
2403
7947b20e
MC
2404 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2405 vcid_addr += (i << PHY_CTX_SHIFT);
2406 pcid_addr += (i << PHY_CTX_SHIFT);
b6016b76 2407
5d5d0015 2408 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
7947b20e 2409 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
b6016b76 2410
7947b20e
MC
2411 /* Zero out the context. */
2412 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
62a8313c 2413 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
7947b20e 2414 }
b6016b76
MC
2415 }
2416}
2417
2418static int
2419bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2420{
2421 u16 *good_mbuf;
2422 u32 good_mbuf_cnt;
2423 u32 val;
2424
2425 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2426 if (good_mbuf == NULL) {
2427 printk(KERN_ERR PFX "Failed to allocate memory in "
2428 "bnx2_alloc_bad_rbuf\n");
2429 return -ENOMEM;
2430 }
2431
2432 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2433 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2434
2435 good_mbuf_cnt = 0;
2436
2437 /* Allocate a bunch of mbufs and save the good ones in an array. */
2726d6e1 2438 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
b6016b76 2439 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2726d6e1
MC
2440 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2441 BNX2_RBUF_COMMAND_ALLOC_REQ);
b6016b76 2442
2726d6e1 2443 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
b6016b76
MC
2444
2445 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2446
2447 /* The addresses with Bit 9 set are bad memory blocks. */
2448 if (!(val & (1 << 9))) {
2449 good_mbuf[good_mbuf_cnt] = (u16) val;
2450 good_mbuf_cnt++;
2451 }
2452
2726d6e1 2453 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
b6016b76
MC
2454 }
2455
2456 /* Free the good ones back to the mbuf pool thus discarding
2457 * all the bad ones. */
2458 while (good_mbuf_cnt) {
2459 good_mbuf_cnt--;
2460
2461 val = good_mbuf[good_mbuf_cnt];
2462 val = (val << 9) | val | 1;
2463
2726d6e1 2464 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
b6016b76
MC
2465 }
2466 kfree(good_mbuf);
2467 return 0;
2468}
2469
2470static void
5fcaed01 2471bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
b6016b76
MC
2472{
2473 u32 val;
b6016b76
MC
2474
2475 val = (mac_addr[0] << 8) | mac_addr[1];
2476
5fcaed01 2477 REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
b6016b76 2478
6aa20a22 2479 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
b6016b76
MC
2480 (mac_addr[4] << 8) | mac_addr[5];
2481
5fcaed01 2482 REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
b6016b76
MC
2483}
2484
47bf4246 2485static inline int
bb4f98ab 2486bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
47bf4246
MC
2487{
2488 dma_addr_t mapping;
bb4f98ab 2489 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
47bf4246 2490 struct rx_bd *rxbd =
bb4f98ab 2491 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
47bf4246
MC
2492 struct page *page = alloc_page(GFP_ATOMIC);
2493
2494 if (!page)
2495 return -ENOMEM;
2496 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2497 PCI_DMA_FROMDEVICE);
3d16af86
BL
2498 if (pci_dma_mapping_error(bp->pdev, mapping)) {
2499 __free_page(page);
2500 return -EIO;
2501 }
2502
47bf4246
MC
2503 rx_pg->page = page;
2504 pci_unmap_addr_set(rx_pg, mapping, mapping);
2505 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2506 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2507 return 0;
2508}
2509
2510static void
bb4f98ab 2511bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
47bf4246 2512{
bb4f98ab 2513 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
47bf4246
MC
2514 struct page *page = rx_pg->page;
2515
2516 if (!page)
2517 return;
2518
2519 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2520 PCI_DMA_FROMDEVICE);
2521
2522 __free_page(page);
2523 rx_pg->page = NULL;
2524}
2525
b6016b76 2526static inline int
bb4f98ab 2527bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
b6016b76
MC
2528{
2529 struct sk_buff *skb;
bb4f98ab 2530 struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
b6016b76 2531 dma_addr_t mapping;
bb4f98ab 2532 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
b6016b76
MC
2533 unsigned long align;
2534
932f3772 2535 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
b6016b76
MC
2536 if (skb == NULL) {
2537 return -ENOMEM;
2538 }
2539
59b47d8a
MC
2540 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2541 skb_reserve(skb, BNX2_RX_ALIGN - align);
b6016b76 2542
b6016b76
MC
2543 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2544 PCI_DMA_FROMDEVICE);
3d16af86
BL
2545 if (pci_dma_mapping_error(bp->pdev, mapping)) {
2546 dev_kfree_skb(skb);
2547 return -EIO;
2548 }
b6016b76
MC
2549
2550 rx_buf->skb = skb;
2551 pci_unmap_addr_set(rx_buf, mapping, mapping);
2552
2553 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2554 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2555
bb4f98ab 2556 rxr->rx_prod_bseq += bp->rx_buf_use_size;
b6016b76
MC
2557
2558 return 0;
2559}
2560
da3e4fbe 2561static int
35efa7c1 2562bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
b6016b76 2563{
43e80b89 2564 struct status_block *sblk = bnapi->status_blk.msi;
b6016b76 2565 u32 new_link_state, old_link_state;
da3e4fbe 2566 int is_set = 1;
b6016b76 2567
da3e4fbe
MC
2568 new_link_state = sblk->status_attn_bits & event;
2569 old_link_state = sblk->status_attn_bits_ack & event;
b6016b76 2570 if (new_link_state != old_link_state) {
da3e4fbe
MC
2571 if (new_link_state)
2572 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2573 else
2574 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2575 } else
2576 is_set = 0;
2577
2578 return is_set;
2579}
2580
2581static void
35efa7c1 2582bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
da3e4fbe 2583{
74ecc62d
MC
2584 spin_lock(&bp->phy_lock);
2585
2586 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
b6016b76 2587 bnx2_set_link(bp);
35efa7c1 2588 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
0d8a6571
MC
2589 bnx2_set_remote_link(bp);
2590
74ecc62d
MC
2591 spin_unlock(&bp->phy_lock);
2592
b6016b76
MC
2593}
2594
ead7270b 2595static inline u16
35efa7c1 2596bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
ead7270b
MC
2597{
2598 u16 cons;
2599
43e80b89
MC
2600 /* Tell compiler that status block fields can change. */
2601 barrier();
2602 cons = *bnapi->hw_tx_cons_ptr;
581daf7e 2603 barrier();
ead7270b
MC
2604 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2605 cons++;
2606 return cons;
2607}
2608
57851d84
MC
2609static int
2610bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
b6016b76 2611{
35e9010b 2612 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
b6016b76 2613 u16 hw_cons, sw_cons, sw_ring_cons;
706bf240
BL
2614 int tx_pkt = 0, index;
2615 struct netdev_queue *txq;
2616
2617 index = (bnapi - bp->bnx2_napi);
2618 txq = netdev_get_tx_queue(bp->dev, index);
b6016b76 2619
35efa7c1 2620 hw_cons = bnx2_get_hw_tx_cons(bnapi);
35e9010b 2621 sw_cons = txr->tx_cons;
b6016b76
MC
2622
2623 while (sw_cons != hw_cons) {
3d16af86 2624 struct sw_tx_bd *tx_buf;
b6016b76
MC
2625 struct sk_buff *skb;
2626 int i, last;
2627
2628 sw_ring_cons = TX_RING_IDX(sw_cons);
2629
35e9010b 2630 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
b6016b76 2631 skb = tx_buf->skb;
1d39ed56 2632
d62fda08
ED
2633 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2634 prefetch(&skb->end);
2635
b6016b76 2636 /* partial BD completions possible with TSO packets */
d62fda08 2637 if (tx_buf->is_gso) {
b6016b76
MC
2638 u16 last_idx, last_ring_idx;
2639
d62fda08
ED
2640 last_idx = sw_cons + tx_buf->nr_frags + 1;
2641 last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
b6016b76
MC
2642 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2643 last_idx++;
2644 }
2645 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2646 break;
2647 }
2648 }
1d39ed56 2649
3d16af86 2650 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
b6016b76
MC
2651
2652 tx_buf->skb = NULL;
d62fda08 2653 last = tx_buf->nr_frags;
b6016b76
MC
2654
2655 for (i = 0; i < last; i++) {
2656 sw_cons = NEXT_TX_BD(sw_cons);
b6016b76
MC
2657 }
2658
2659 sw_cons = NEXT_TX_BD(sw_cons);
2660
745720e5 2661 dev_kfree_skb(skb);
57851d84
MC
2662 tx_pkt++;
2663 if (tx_pkt == budget)
2664 break;
b6016b76 2665
d62fda08
ED
2666 if (hw_cons == sw_cons)
2667 hw_cons = bnx2_get_hw_tx_cons(bnapi);
b6016b76
MC
2668 }
2669
35e9010b
MC
2670 txr->hw_tx_cons = hw_cons;
2671 txr->tx_cons = sw_cons;
706bf240 2672
2f8af120 2673 /* Need to make the tx_cons update visible to bnx2_start_xmit()
706bf240 2674 * before checking for netif_tx_queue_stopped(). Without the
2f8af120
MC
2675 * memory barrier, there is a small possibility that bnx2_start_xmit()
2676 * will miss it and cause the queue to be stopped forever.
2677 */
2678 smp_mb();
b6016b76 2679
706bf240 2680 if (unlikely(netif_tx_queue_stopped(txq)) &&
35e9010b 2681 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
706bf240
BL
2682 __netif_tx_lock(txq, smp_processor_id());
2683 if ((netif_tx_queue_stopped(txq)) &&
35e9010b 2684 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
706bf240
BL
2685 netif_tx_wake_queue(txq);
2686 __netif_tx_unlock(txq);
b6016b76 2687 }
706bf240 2688
57851d84 2689 return tx_pkt;
b6016b76
MC
2690}
2691
1db82f2a 2692static void
bb4f98ab 2693bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
a1f60190 2694 struct sk_buff *skb, int count)
1db82f2a
MC
2695{
2696 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2697 struct rx_bd *cons_bd, *prod_bd;
1db82f2a 2698 int i;
3d16af86 2699 u16 hw_prod, prod;
bb4f98ab 2700 u16 cons = rxr->rx_pg_cons;
1db82f2a 2701
3d16af86
BL
2702 cons_rx_pg = &rxr->rx_pg_ring[cons];
2703
2704 /* The caller was unable to allocate a new page to replace the
2705 * last one in the frags array, so we need to recycle that page
2706 * and then free the skb.
2707 */
2708 if (skb) {
2709 struct page *page;
2710 struct skb_shared_info *shinfo;
2711
2712 shinfo = skb_shinfo(skb);
2713 shinfo->nr_frags--;
2714 page = shinfo->frags[shinfo->nr_frags].page;
2715 shinfo->frags[shinfo->nr_frags].page = NULL;
2716
2717 cons_rx_pg->page = page;
2718 dev_kfree_skb(skb);
2719 }
2720
2721 hw_prod = rxr->rx_pg_prod;
2722
1db82f2a
MC
2723 for (i = 0; i < count; i++) {
2724 prod = RX_PG_RING_IDX(hw_prod);
2725
bb4f98ab
MC
2726 prod_rx_pg = &rxr->rx_pg_ring[prod];
2727 cons_rx_pg = &rxr->rx_pg_ring[cons];
2728 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2729 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1db82f2a 2730
1db82f2a
MC
2731 if (prod != cons) {
2732 prod_rx_pg->page = cons_rx_pg->page;
2733 cons_rx_pg->page = NULL;
2734 pci_unmap_addr_set(prod_rx_pg, mapping,
2735 pci_unmap_addr(cons_rx_pg, mapping));
2736
2737 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2738 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2739
2740 }
2741 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2742 hw_prod = NEXT_RX_BD(hw_prod);
2743 }
bb4f98ab
MC
2744 rxr->rx_pg_prod = hw_prod;
2745 rxr->rx_pg_cons = cons;
1db82f2a
MC
2746}
2747
b6016b76 2748static inline void
bb4f98ab
MC
2749bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2750 struct sk_buff *skb, u16 cons, u16 prod)
b6016b76 2751{
236b6394
MC
2752 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2753 struct rx_bd *cons_bd, *prod_bd;
2754
bb4f98ab
MC
2755 cons_rx_buf = &rxr->rx_buf_ring[cons];
2756 prod_rx_buf = &rxr->rx_buf_ring[prod];
b6016b76
MC
2757
2758 pci_dma_sync_single_for_device(bp->pdev,
2759 pci_unmap_addr(cons_rx_buf, mapping),
601d3d18 2760 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
b6016b76 2761
bb4f98ab 2762 rxr->rx_prod_bseq += bp->rx_buf_use_size;
b6016b76 2763
236b6394 2764 prod_rx_buf->skb = skb;
b6016b76 2765
236b6394
MC
2766 if (cons == prod)
2767 return;
b6016b76 2768
236b6394
MC
2769 pci_unmap_addr_set(prod_rx_buf, mapping,
2770 pci_unmap_addr(cons_rx_buf, mapping));
2771
bb4f98ab
MC
2772 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2773 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
236b6394
MC
2774 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2775 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
b6016b76
MC
2776}
2777
85833c62 2778static int
bb4f98ab 2779bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
a1f60190
MC
2780 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2781 u32 ring_idx)
85833c62
MC
2782{
2783 int err;
2784 u16 prod = ring_idx & 0xffff;
2785
bb4f98ab 2786 err = bnx2_alloc_rx_skb(bp, rxr, prod);
85833c62 2787 if (unlikely(err)) {
bb4f98ab 2788 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
1db82f2a
MC
2789 if (hdr_len) {
2790 unsigned int raw_len = len + 4;
2791 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2792
bb4f98ab 2793 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
1db82f2a 2794 }
85833c62
MC
2795 return err;
2796 }
2797
d89cb6af 2798 skb_reserve(skb, BNX2_RX_OFFSET);
85833c62
MC
2799 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2800 PCI_DMA_FROMDEVICE);
2801
1db82f2a
MC
2802 if (hdr_len == 0) {
2803 skb_put(skb, len);
2804 return 0;
2805 } else {
2806 unsigned int i, frag_len, frag_size, pages;
2807 struct sw_pg *rx_pg;
bb4f98ab
MC
2808 u16 pg_cons = rxr->rx_pg_cons;
2809 u16 pg_prod = rxr->rx_pg_prod;
1db82f2a
MC
2810
2811 frag_size = len + 4 - hdr_len;
2812 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2813 skb_put(skb, hdr_len);
2814
2815 for (i = 0; i < pages; i++) {
3d16af86
BL
2816 dma_addr_t mapping_old;
2817
1db82f2a
MC
2818 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2819 if (unlikely(frag_len <= 4)) {
2820 unsigned int tail = 4 - frag_len;
2821
bb4f98ab
MC
2822 rxr->rx_pg_cons = pg_cons;
2823 rxr->rx_pg_prod = pg_prod;
2824 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
a1f60190 2825 pages - i);
1db82f2a
MC
2826 skb->len -= tail;
2827 if (i == 0) {
2828 skb->tail -= tail;
2829 } else {
2830 skb_frag_t *frag =
2831 &skb_shinfo(skb)->frags[i - 1];
2832 frag->size -= tail;
2833 skb->data_len -= tail;
2834 skb->truesize -= tail;
2835 }
2836 return 0;
2837 }
bb4f98ab 2838 rx_pg = &rxr->rx_pg_ring[pg_cons];
1db82f2a 2839
3d16af86
BL
2840 /* Don't unmap yet. If we're unable to allocate a new
2841 * page, we need to recycle the page and the DMA addr.
2842 */
2843 mapping_old = pci_unmap_addr(rx_pg, mapping);
1db82f2a
MC
2844 if (i == pages - 1)
2845 frag_len -= 4;
2846
2847 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2848 rx_pg->page = NULL;
2849
bb4f98ab
MC
2850 err = bnx2_alloc_rx_page(bp, rxr,
2851 RX_PG_RING_IDX(pg_prod));
1db82f2a 2852 if (unlikely(err)) {
bb4f98ab
MC
2853 rxr->rx_pg_cons = pg_cons;
2854 rxr->rx_pg_prod = pg_prod;
2855 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
a1f60190 2856 pages - i);
1db82f2a
MC
2857 return err;
2858 }
2859
3d16af86
BL
2860 pci_unmap_page(bp->pdev, mapping_old,
2861 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2862
1db82f2a
MC
2863 frag_size -= frag_len;
2864 skb->data_len += frag_len;
2865 skb->truesize += frag_len;
2866 skb->len += frag_len;
2867
2868 pg_prod = NEXT_RX_BD(pg_prod);
2869 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2870 }
bb4f98ab
MC
2871 rxr->rx_pg_prod = pg_prod;
2872 rxr->rx_pg_cons = pg_cons;
1db82f2a 2873 }
85833c62
MC
2874 return 0;
2875}
2876
c09c2627 2877static inline u16
35efa7c1 2878bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
c09c2627 2879{
bb4f98ab
MC
2880 u16 cons;
2881
43e80b89
MC
2882 /* Tell compiler that status block fields can change. */
2883 barrier();
2884 cons = *bnapi->hw_rx_cons_ptr;
581daf7e 2885 barrier();
c09c2627
MC
2886 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2887 cons++;
2888 return cons;
2889}
2890
b6016b76 2891static int
35efa7c1 2892bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
b6016b76 2893{
bb4f98ab 2894 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
b6016b76
MC
2895 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2896 struct l2_fhdr *rx_hdr;
1db82f2a 2897 int rx_pkt = 0, pg_ring_used = 0;
b6016b76 2898
35efa7c1 2899 hw_cons = bnx2_get_hw_rx_cons(bnapi);
bb4f98ab
MC
2900 sw_cons = rxr->rx_cons;
2901 sw_prod = rxr->rx_prod;
b6016b76
MC
2902
2903 /* Memory barrier necessary as speculative reads of the rx
2904 * buffer can be ahead of the index in the status block
2905 */
2906 rmb();
2907 while (sw_cons != hw_cons) {
1db82f2a 2908 unsigned int len, hdr_len;
ade2bfe7 2909 u32 status;
b6016b76
MC
2910 struct sw_bd *rx_buf;
2911 struct sk_buff *skb;
236b6394 2912 dma_addr_t dma_addr;
f22828e8
MC
2913 u16 vtag = 0;
2914 int hw_vlan __maybe_unused = 0;
b6016b76
MC
2915
2916 sw_ring_cons = RX_RING_IDX(sw_cons);
2917 sw_ring_prod = RX_RING_IDX(sw_prod);
2918
bb4f98ab 2919 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
b6016b76 2920 skb = rx_buf->skb;
236b6394
MC
2921
2922 rx_buf->skb = NULL;
2923
2924 dma_addr = pci_unmap_addr(rx_buf, mapping);
2925
2926 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
601d3d18
BL
2927 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
2928 PCI_DMA_FROMDEVICE);
b6016b76
MC
2929
2930 rx_hdr = (struct l2_fhdr *) skb->data;
1db82f2a 2931 len = rx_hdr->l2_fhdr_pkt_len;
990ec380 2932 status = rx_hdr->l2_fhdr_status;
b6016b76 2933
1db82f2a
MC
2934 hdr_len = 0;
2935 if (status & L2_FHDR_STATUS_SPLIT) {
2936 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2937 pg_ring_used = 1;
2938 } else if (len > bp->rx_jumbo_thresh) {
2939 hdr_len = bp->rx_jumbo_thresh;
2940 pg_ring_used = 1;
2941 }
2942
990ec380
MC
2943 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
2944 L2_FHDR_ERRORS_PHY_DECODE |
2945 L2_FHDR_ERRORS_ALIGNMENT |
2946 L2_FHDR_ERRORS_TOO_SHORT |
2947 L2_FHDR_ERRORS_GIANT_FRAME))) {
2948
2949 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2950 sw_ring_prod);
2951 if (pg_ring_used) {
2952 int pages;
2953
2954 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
2955
2956 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2957 }
2958 goto next_rx;
2959 }
2960
1db82f2a 2961 len -= 4;
b6016b76 2962
5d5d0015 2963 if (len <= bp->rx_copy_thresh) {
b6016b76
MC
2964 struct sk_buff *new_skb;
2965
f22828e8 2966 new_skb = netdev_alloc_skb(bp->dev, len + 6);
85833c62 2967 if (new_skb == NULL) {
bb4f98ab 2968 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
85833c62
MC
2969 sw_ring_prod);
2970 goto next_rx;
2971 }
b6016b76
MC
2972
2973 /* aligned copy */
d89cb6af 2974 skb_copy_from_linear_data_offset(skb,
f22828e8
MC
2975 BNX2_RX_OFFSET - 6,
2976 new_skb->data, len + 6);
2977 skb_reserve(new_skb, 6);
b6016b76 2978 skb_put(new_skb, len);
b6016b76 2979
bb4f98ab 2980 bnx2_reuse_rx_skb(bp, rxr, skb,
b6016b76
MC
2981 sw_ring_cons, sw_ring_prod);
2982
2983 skb = new_skb;
bb4f98ab 2984 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
a1f60190 2985 dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
b6016b76 2986 goto next_rx;
b6016b76 2987
f22828e8
MC
2988 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
2989 !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
2990 vtag = rx_hdr->l2_fhdr_vlan_tag;
2991#ifdef BCM_VLAN
2992 if (bp->vlgrp)
2993 hw_vlan = 1;
2994 else
2995#endif
2996 {
2997 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
2998 __skb_push(skb, 4);
2999
3000 memmove(ve, skb->data + 4, ETH_ALEN * 2);
3001 ve->h_vlan_proto = htons(ETH_P_8021Q);
3002 ve->h_vlan_TCI = htons(vtag);
3003 len += 4;
3004 }
3005 }
3006
b6016b76
MC
3007 skb->protocol = eth_type_trans(skb, bp->dev);
3008
3009 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
d1e100ba 3010 (ntohs(skb->protocol) != 0x8100)) {
b6016b76 3011
745720e5 3012 dev_kfree_skb(skb);
b6016b76
MC
3013 goto next_rx;
3014
3015 }
3016
b6016b76
MC
3017 skb->ip_summed = CHECKSUM_NONE;
3018 if (bp->rx_csum &&
3019 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3020 L2_FHDR_STATUS_UDP_DATAGRAM))) {
3021
ade2bfe7
MC
3022 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3023 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
b6016b76
MC
3024 skb->ip_summed = CHECKSUM_UNNECESSARY;
3025 }
3026
0c8dfc83
DM
3027 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3028
b6016b76 3029#ifdef BCM_VLAN
f22828e8
MC
3030 if (hw_vlan)
3031 vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
b6016b76
MC
3032 else
3033#endif
3034 netif_receive_skb(skb);
3035
b6016b76
MC
3036 rx_pkt++;
3037
3038next_rx:
b6016b76
MC
3039 sw_cons = NEXT_RX_BD(sw_cons);
3040 sw_prod = NEXT_RX_BD(sw_prod);
3041
3042 if ((rx_pkt == budget))
3043 break;
f4e418f7
MC
3044
3045 /* Refresh hw_cons to see if there is new work */
3046 if (sw_cons == hw_cons) {
35efa7c1 3047 hw_cons = bnx2_get_hw_rx_cons(bnapi);
f4e418f7
MC
3048 rmb();
3049 }
b6016b76 3050 }
bb4f98ab
MC
3051 rxr->rx_cons = sw_cons;
3052 rxr->rx_prod = sw_prod;
b6016b76 3053
1db82f2a 3054 if (pg_ring_used)
bb4f98ab 3055 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
1db82f2a 3056
bb4f98ab 3057 REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
b6016b76 3058
bb4f98ab 3059 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
b6016b76
MC
3060
3061 mmiowb();
3062
3063 return rx_pkt;
3064
3065}
3066
3067/* MSI ISR - The only difference between this and the INTx ISR
3068 * is that the MSI interrupt is always serviced.
3069 */
3070static irqreturn_t
7d12e780 3071bnx2_msi(int irq, void *dev_instance)
b6016b76 3072{
f0ea2e63
MC
3073 struct bnx2_napi *bnapi = dev_instance;
3074 struct bnx2 *bp = bnapi->bp;
b6016b76 3075
43e80b89 3076 prefetch(bnapi->status_blk.msi);
b6016b76
MC
3077 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3078 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3079 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3080
3081 /* Return here if interrupt is disabled. */
73eef4cd
MC
3082 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3083 return IRQ_HANDLED;
b6016b76 3084
288379f0 3085 napi_schedule(&bnapi->napi);
b6016b76 3086
73eef4cd 3087 return IRQ_HANDLED;
b6016b76
MC
3088}
3089
8e6a72c4
MC
3090static irqreturn_t
3091bnx2_msi_1shot(int irq, void *dev_instance)
3092{
f0ea2e63
MC
3093 struct bnx2_napi *bnapi = dev_instance;
3094 struct bnx2 *bp = bnapi->bp;
8e6a72c4 3095
43e80b89 3096 prefetch(bnapi->status_blk.msi);
8e6a72c4
MC
3097
3098 /* Return here if interrupt is disabled. */
3099 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3100 return IRQ_HANDLED;
3101
288379f0 3102 napi_schedule(&bnapi->napi);
8e6a72c4
MC
3103
3104 return IRQ_HANDLED;
3105}
3106
b6016b76 3107static irqreturn_t
7d12e780 3108bnx2_interrupt(int irq, void *dev_instance)
b6016b76 3109{
f0ea2e63
MC
3110 struct bnx2_napi *bnapi = dev_instance;
3111 struct bnx2 *bp = bnapi->bp;
43e80b89 3112 struct status_block *sblk = bnapi->status_blk.msi;
b6016b76
MC
3113
3114 /* When using INTx, it is possible for the interrupt to arrive
3115 * at the CPU before the status block posted prior to the
3116 * interrupt. Reading a register will flush the status block.
3117 * When using MSI, the MSI message will always complete after
3118 * the status block write.
3119 */
35efa7c1 3120 if ((sblk->status_idx == bnapi->last_status_idx) &&
b6016b76
MC
3121 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3122 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
73eef4cd 3123 return IRQ_NONE;
b6016b76
MC
3124
3125 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3126 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3127 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3128
b8a7ce7b
MC
3129 /* Read back to deassert IRQ immediately to avoid too many
3130 * spurious interrupts.
3131 */
3132 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3133
b6016b76 3134 /* Return here if interrupt is shared and is disabled. */
73eef4cd
MC
3135 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3136 return IRQ_HANDLED;
b6016b76 3137
288379f0 3138 if (napi_schedule_prep(&bnapi->napi)) {
35efa7c1 3139 bnapi->last_status_idx = sblk->status_idx;
288379f0 3140 __napi_schedule(&bnapi->napi);
b8a7ce7b 3141 }
b6016b76 3142
73eef4cd 3143 return IRQ_HANDLED;
b6016b76
MC
3144}
3145
f4e418f7 3146static inline int
43e80b89 3147bnx2_has_fast_work(struct bnx2_napi *bnapi)
f4e418f7 3148{
35e9010b 3149 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
bb4f98ab 3150 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
f4e418f7 3151
bb4f98ab 3152 if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
35e9010b 3153 (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
f4e418f7 3154 return 1;
43e80b89
MC
3155 return 0;
3156}
3157
3158#define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
3159 STATUS_ATTN_BITS_TIMER_ABORT)
3160
3161static inline int
3162bnx2_has_work(struct bnx2_napi *bnapi)
3163{
3164 struct status_block *sblk = bnapi->status_blk.msi;
3165
3166 if (bnx2_has_fast_work(bnapi))
3167 return 1;
f4e418f7 3168
da3e4fbe
MC
3169 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3170 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
f4e418f7
MC
3171 return 1;
3172
3173 return 0;
3174}
3175
efba0180
MC
3176static void
3177bnx2_chk_missed_msi(struct bnx2 *bp)
3178{
3179 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3180 u32 msi_ctrl;
3181
3182 if (bnx2_has_work(bnapi)) {
3183 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3184 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3185 return;
3186
3187 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3188 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3189 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3190 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3191 bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3192 }
3193 }
3194
3195 bp->idle_chk_status_idx = bnapi->last_status_idx;
3196}
3197
43e80b89 3198static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
b6016b76 3199{
43e80b89 3200 struct status_block *sblk = bnapi->status_blk.msi;
da3e4fbe
MC
3201 u32 status_attn_bits = sblk->status_attn_bits;
3202 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
b6016b76 3203
da3e4fbe
MC
3204 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3205 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
b6016b76 3206
35efa7c1 3207 bnx2_phy_int(bp, bnapi);
bf5295bb
MC
3208
3209 /* This is needed to take care of transient status
3210 * during link changes.
3211 */
3212 REG_WR(bp, BNX2_HC_COMMAND,
3213 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3214 REG_RD(bp, BNX2_HC_COMMAND);
b6016b76 3215 }
43e80b89
MC
3216}
3217
3218static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3219 int work_done, int budget)
3220{
3221 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3222 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
b6016b76 3223
35e9010b 3224 if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
57851d84 3225 bnx2_tx_int(bp, bnapi, 0);
b6016b76 3226
bb4f98ab 3227 if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
35efa7c1 3228 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
6aa20a22 3229
6f535763
DM
3230 return work_done;
3231}
3232
f0ea2e63
MC
3233static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3234{
3235 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3236 struct bnx2 *bp = bnapi->bp;
3237 int work_done = 0;
3238 struct status_block_msix *sblk = bnapi->status_blk.msix;
3239
3240 while (1) {
3241 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3242 if (unlikely(work_done >= budget))
3243 break;
3244
3245 bnapi->last_status_idx = sblk->status_idx;
3246 /* status idx must be read before checking for more work. */
3247 rmb();
3248 if (likely(!bnx2_has_fast_work(bnapi))) {
3249
288379f0 3250 napi_complete(napi);
f0ea2e63
MC
3251 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3252 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3253 bnapi->last_status_idx);
3254 break;
3255 }
3256 }
3257 return work_done;
3258}
3259
6f535763
DM
3260static int bnx2_poll(struct napi_struct *napi, int budget)
3261{
35efa7c1
MC
3262 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3263 struct bnx2 *bp = bnapi->bp;
6f535763 3264 int work_done = 0;
43e80b89 3265 struct status_block *sblk = bnapi->status_blk.msi;
6f535763
DM
3266
3267 while (1) {
43e80b89
MC
3268 bnx2_poll_link(bp, bnapi);
3269
35efa7c1 3270 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
f4e418f7 3271
35efa7c1 3272 /* bnapi->last_status_idx is used below to tell the hw how
6dee6421
MC
3273 * much work has been processed, so we must read it before
3274 * checking for more work.
3275 */
35efa7c1 3276 bnapi->last_status_idx = sblk->status_idx;
efba0180
MC
3277
3278 if (unlikely(work_done >= budget))
3279 break;
3280
6dee6421 3281 rmb();
35efa7c1 3282 if (likely(!bnx2_has_work(bnapi))) {
288379f0 3283 napi_complete(napi);
f86e82fb 3284 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
6f535763
DM
3285 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3286 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
35efa7c1 3287 bnapi->last_status_idx);
6dee6421 3288 break;
6f535763 3289 }
1269a8a6
MC
3290 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3291 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
6f535763 3292 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
35efa7c1 3293 bnapi->last_status_idx);
1269a8a6 3294
6f535763
DM
3295 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3296 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
35efa7c1 3297 bnapi->last_status_idx);
6f535763
DM
3298 break;
3299 }
b6016b76
MC
3300 }
3301
bea3348e 3302 return work_done;
b6016b76
MC
3303}
3304
932ff279 3305/* Called with rtnl_lock from vlan functions and also netif_tx_lock
b6016b76
MC
3306 * from set_multicast.
3307 */
3308static void
3309bnx2_set_rx_mode(struct net_device *dev)
3310{
972ec0d4 3311 struct bnx2 *bp = netdev_priv(dev);
b6016b76 3312 u32 rx_mode, sort_mode;
5fcaed01 3313 struct dev_addr_list *uc_ptr;
b6016b76 3314 int i;
b6016b76 3315
9f52b564
MC
3316 if (!netif_running(dev))
3317 return;
3318
c770a65c 3319 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
3320
3321 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3322 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3323 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3324#ifdef BCM_VLAN
7c6337a1 3325 if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
b6016b76 3326 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
b6016b76 3327#else
7c6337a1 3328 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
e29054f9 3329 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
b6016b76
MC
3330#endif
3331 if (dev->flags & IFF_PROMISC) {
3332 /* Promiscuous mode. */
3333 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
7510873d
MC
3334 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3335 BNX2_RPM_SORT_USER0_PROM_VLAN;
b6016b76
MC
3336 }
3337 else if (dev->flags & IFF_ALLMULTI) {
3338 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3339 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3340 0xffffffff);
3341 }
3342 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3343 }
3344 else {
3345 /* Accept one or more multicast(s). */
3346 struct dev_mc_list *mclist;
3347 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3348 u32 regidx;
3349 u32 bit;
3350 u32 crc;
3351
3352 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3353
3354 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3355 i++, mclist = mclist->next) {
3356
3357 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3358 bit = crc & 0xff;
3359 regidx = (bit & 0xe0) >> 5;
3360 bit &= 0x1f;
3361 mc_filter[regidx] |= (1 << bit);
3362 }
3363
3364 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3365 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3366 mc_filter[i]);
3367 }
3368
3369 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3370 }
3371
5fcaed01
BL
3372 uc_ptr = NULL;
3373 if (dev->uc_count > BNX2_MAX_UNICAST_ADDRESSES) {
3374 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3375 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3376 BNX2_RPM_SORT_USER0_PROM_VLAN;
3377 } else if (!(dev->flags & IFF_PROMISC)) {
3378 uc_ptr = dev->uc_list;
3379
3380 /* Add all entries into to the match filter list */
3381 for (i = 0; i < dev->uc_count; i++) {
3382 bnx2_set_mac_addr(bp, uc_ptr->da_addr,
3383 i + BNX2_START_UNICAST_ADDRESS_INDEX);
3384 sort_mode |= (1 <<
3385 (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3386 uc_ptr = uc_ptr->next;
3387 }
3388
3389 }
3390
b6016b76
MC
3391 if (rx_mode != bp->rx_mode) {
3392 bp->rx_mode = rx_mode;
3393 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3394 }
3395
3396 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3397 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3398 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3399
c770a65c 3400 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
3401}
3402
57579f76
MC
3403static int __devinit
3404check_fw_section(const struct firmware *fw,
3405 const struct bnx2_fw_file_section *section,
3406 u32 alignment, bool non_empty)
3407{
3408 u32 offset = be32_to_cpu(section->offset);
3409 u32 len = be32_to_cpu(section->len);
3410
3411 if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3412 return -EINVAL;
3413 if ((non_empty && len == 0) || len > fw->size - offset ||
3414 len & (alignment - 1))
3415 return -EINVAL;
3416 return 0;
3417}
3418
3419static int __devinit
3420check_mips_fw_entry(const struct firmware *fw,
3421 const struct bnx2_mips_fw_file_entry *entry)
3422{
3423 if (check_fw_section(fw, &entry->text, 4, true) ||
3424 check_fw_section(fw, &entry->data, 4, false) ||
3425 check_fw_section(fw, &entry->rodata, 4, false))
3426 return -EINVAL;
3427 return 0;
3428}
3429
3430static int __devinit
3431bnx2_request_firmware(struct bnx2 *bp)
b6016b76 3432{
57579f76 3433 const char *mips_fw_file, *rv2p_fw_file;
5ee1c326
BB
3434 const struct bnx2_mips_fw_file *mips_fw;
3435 const struct bnx2_rv2p_fw_file *rv2p_fw;
57579f76
MC
3436 int rc;
3437
3438 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3439 mips_fw_file = FW_MIPS_FILE_09;
3440 rv2p_fw_file = FW_RV2P_FILE_09;
3441 } else {
3442 mips_fw_file = FW_MIPS_FILE_06;
3443 rv2p_fw_file = FW_RV2P_FILE_06;
3444 }
3445
3446 rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3447 if (rc) {
3448 printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n",
3449 mips_fw_file);
3450 return rc;
3451 }
3452
3453 rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3454 if (rc) {
3455 printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n",
3456 rv2p_fw_file);
3457 return rc;
3458 }
5ee1c326
BB
3459 mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3460 rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3461 if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3462 check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3463 check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3464 check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3465 check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3466 check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
57579f76
MC
3467 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
3468 mips_fw_file);
3469 return -EINVAL;
3470 }
5ee1c326
BB
3471 if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3472 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3473 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
57579f76
MC
3474 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
3475 rv2p_fw_file);
3476 return -EINVAL;
3477 }
3478
3479 return 0;
3480}
3481
3482static u32
3483rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3484{
3485 switch (idx) {
3486 case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3487 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3488 rv2p_code |= RV2P_BD_PAGE_SIZE;
3489 break;
3490 }
3491 return rv2p_code;
3492}
3493
3494static int
3495load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3496 const struct bnx2_rv2p_fw_file_entry *fw_entry)
3497{
3498 u32 rv2p_code_len, file_offset;
3499 __be32 *rv2p_code;
b6016b76 3500 int i;
57579f76
MC
3501 u32 val, cmd, addr;
3502
3503 rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3504 file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3505
3506 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
b6016b76 3507
57579f76
MC
3508 if (rv2p_proc == RV2P_PROC1) {
3509 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3510 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3511 } else {
3512 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3513 addr = BNX2_RV2P_PROC2_ADDR_CMD;
d25be1d3 3514 }
b6016b76
MC
3515
3516 for (i = 0; i < rv2p_code_len; i += 8) {
57579f76 3517 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
b6016b76 3518 rv2p_code++;
57579f76 3519 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
b6016b76
MC
3520 rv2p_code++;
3521
57579f76
MC
3522 val = (i / 8) | cmd;
3523 REG_WR(bp, addr, val);
3524 }
3525
3526 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3527 for (i = 0; i < 8; i++) {
3528 u32 loc, code;
3529
3530 loc = be32_to_cpu(fw_entry->fixup[i]);
3531 if (loc && ((loc * 4) < rv2p_code_len)) {
3532 code = be32_to_cpu(*(rv2p_code + loc - 1));
3533 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3534 code = be32_to_cpu(*(rv2p_code + loc));
3535 code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3536 REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3537
3538 val = (loc / 2) | cmd;
3539 REG_WR(bp, addr, val);
b6016b76
MC
3540 }
3541 }
3542
3543 /* Reset the processor, un-stall is done later. */
3544 if (rv2p_proc == RV2P_PROC1) {
3545 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3546 }
3547 else {
3548 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3549 }
57579f76
MC
3550
3551 return 0;
b6016b76
MC
3552}
3553
af3ee519 3554static int
57579f76
MC
3555load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3556 const struct bnx2_mips_fw_file_entry *fw_entry)
b6016b76 3557{
57579f76
MC
3558 u32 addr, len, file_offset;
3559 __be32 *data;
b6016b76
MC
3560 u32 offset;
3561 u32 val;
3562
3563 /* Halt the CPU. */
2726d6e1 3564 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
b6016b76 3565 val |= cpu_reg->mode_value_halt;
2726d6e1
MC
3566 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3567 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
b6016b76
MC
3568
3569 /* Load the Text area. */
57579f76
MC
3570 addr = be32_to_cpu(fw_entry->text.addr);
3571 len = be32_to_cpu(fw_entry->text.len);
3572 file_offset = be32_to_cpu(fw_entry->text.offset);
3573 data = (__be32 *)(bp->mips_firmware->data + file_offset);
ea1f8d5c 3574
57579f76
MC
3575 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3576 if (len) {
b6016b76
MC
3577 int j;
3578
57579f76
MC
3579 for (j = 0; j < (len / 4); j++, offset += 4)
3580 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
b6016b76
MC
3581 }
3582
57579f76
MC
3583 /* Load the Data area. */
3584 addr = be32_to_cpu(fw_entry->data.addr);
3585 len = be32_to_cpu(fw_entry->data.len);
3586 file_offset = be32_to_cpu(fw_entry->data.offset);
3587 data = (__be32 *)(bp->mips_firmware->data + file_offset);
b6016b76 3588
57579f76
MC
3589 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3590 if (len) {
b6016b76
MC
3591 int j;
3592
57579f76
MC
3593 for (j = 0; j < (len / 4); j++, offset += 4)
3594 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
b6016b76
MC
3595 }
3596
3597 /* Load the Read-Only area. */
57579f76
MC
3598 addr = be32_to_cpu(fw_entry->rodata.addr);
3599 len = be32_to_cpu(fw_entry->rodata.len);
3600 file_offset = be32_to_cpu(fw_entry->rodata.offset);
3601 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3602
3603 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3604 if (len) {
b6016b76
MC
3605 int j;
3606
57579f76
MC
3607 for (j = 0; j < (len / 4); j++, offset += 4)
3608 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
b6016b76
MC
3609 }
3610
3611 /* Clear the pre-fetch instruction. */
2726d6e1 3612 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
57579f76
MC
3613
3614 val = be32_to_cpu(fw_entry->start_addr);
3615 bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
b6016b76
MC
3616
3617 /* Start the CPU. */
2726d6e1 3618 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
b6016b76 3619 val &= ~cpu_reg->mode_value_halt;
2726d6e1
MC
3620 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3621 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
af3ee519
MC
3622
3623 return 0;
b6016b76
MC
3624}
3625
fba9fe91 3626static int
b6016b76
MC
3627bnx2_init_cpus(struct bnx2 *bp)
3628{
57579f76
MC
3629 const struct bnx2_mips_fw_file *mips_fw =
3630 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3631 const struct bnx2_rv2p_fw_file *rv2p_fw =
3632 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3633 int rc;
b6016b76
MC
3634
3635 /* Initialize the RV2P processor. */
57579f76
MC
3636 load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3637 load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
b6016b76
MC
3638
3639 /* Initialize the RX Processor. */
57579f76 3640 rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
fba9fe91
MC
3641 if (rc)
3642 goto init_cpu_err;
3643
b6016b76 3644 /* Initialize the TX Processor. */
57579f76 3645 rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
fba9fe91
MC
3646 if (rc)
3647 goto init_cpu_err;
3648
b6016b76 3649 /* Initialize the TX Patch-up Processor. */
57579f76 3650 rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
fba9fe91
MC
3651 if (rc)
3652 goto init_cpu_err;
3653
b6016b76 3654 /* Initialize the Completion Processor. */
57579f76 3655 rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
fba9fe91
MC
3656 if (rc)
3657 goto init_cpu_err;
3658
d43584c8 3659 /* Initialize the Command Processor. */
57579f76 3660 rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
b6016b76 3661
fba9fe91 3662init_cpu_err:
fba9fe91 3663 return rc;
b6016b76
MC
3664}
3665
3666static int
829ca9a3 3667bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
b6016b76
MC
3668{
3669 u16 pmcsr;
3670
3671 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3672
3673 switch (state) {
829ca9a3 3674 case PCI_D0: {
b6016b76
MC
3675 u32 val;
3676
3677 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3678 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3679 PCI_PM_CTRL_PME_STATUS);
3680
3681 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3682 /* delay required during transition out of D3hot */
3683 msleep(20);
3684
3685 val = REG_RD(bp, BNX2_EMAC_MODE);
3686 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3687 val &= ~BNX2_EMAC_MODE_MPKT;
3688 REG_WR(bp, BNX2_EMAC_MODE, val);
3689
3690 val = REG_RD(bp, BNX2_RPM_CONFIG);
3691 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3692 REG_WR(bp, BNX2_RPM_CONFIG, val);
3693 break;
3694 }
829ca9a3 3695 case PCI_D3hot: {
b6016b76
MC
3696 int i;
3697 u32 val, wol_msg;
3698
3699 if (bp->wol) {
3700 u32 advertising;
3701 u8 autoneg;
3702
3703 autoneg = bp->autoneg;
3704 advertising = bp->advertising;
3705
239cd343
MC
3706 if (bp->phy_port == PORT_TP) {
3707 bp->autoneg = AUTONEG_SPEED;
3708 bp->advertising = ADVERTISED_10baseT_Half |
3709 ADVERTISED_10baseT_Full |
3710 ADVERTISED_100baseT_Half |
3711 ADVERTISED_100baseT_Full |
3712 ADVERTISED_Autoneg;
3713 }
b6016b76 3714
239cd343
MC
3715 spin_lock_bh(&bp->phy_lock);
3716 bnx2_setup_phy(bp, bp->phy_port);
3717 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
3718
3719 bp->autoneg = autoneg;
3720 bp->advertising = advertising;
3721
5fcaed01 3722 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
b6016b76
MC
3723
3724 val = REG_RD(bp, BNX2_EMAC_MODE);
3725
3726 /* Enable port mode. */
3727 val &= ~BNX2_EMAC_MODE_PORT;
239cd343 3728 val |= BNX2_EMAC_MODE_MPKT_RCVD |
b6016b76 3729 BNX2_EMAC_MODE_ACPI_RCVD |
b6016b76 3730 BNX2_EMAC_MODE_MPKT;
239cd343
MC
3731 if (bp->phy_port == PORT_TP)
3732 val |= BNX2_EMAC_MODE_PORT_MII;
3733 else {
3734 val |= BNX2_EMAC_MODE_PORT_GMII;
3735 if (bp->line_speed == SPEED_2500)
3736 val |= BNX2_EMAC_MODE_25G_MODE;
3737 }
b6016b76
MC
3738
3739 REG_WR(bp, BNX2_EMAC_MODE, val);
3740
3741 /* receive all multicast */
3742 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3743 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3744 0xffffffff);
3745 }
3746 REG_WR(bp, BNX2_EMAC_RX_MODE,
3747 BNX2_EMAC_RX_MODE_SORT_MODE);
3748
3749 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3750 BNX2_RPM_SORT_USER0_MC_EN;
3751 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3752 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3753 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3754 BNX2_RPM_SORT_USER0_ENA);
3755
3756 /* Need to enable EMAC and RPM for WOL. */
3757 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3758 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3759 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3760 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3761
3762 val = REG_RD(bp, BNX2_RPM_CONFIG);
3763 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3764 REG_WR(bp, BNX2_RPM_CONFIG, val);
3765
3766 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3767 }
3768 else {
3769 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3770 }
3771
f86e82fb 3772 if (!(bp->flags & BNX2_FLAG_NO_WOL))
a2f13890
MC
3773 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3774 1, 0);
b6016b76
MC
3775
3776 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3777 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3778 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3779
3780 if (bp->wol)
3781 pmcsr |= 3;
3782 }
3783 else {
3784 pmcsr |= 3;
3785 }
3786 if (bp->wol) {
3787 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3788 }
3789 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3790 pmcsr);
3791
3792 /* No more memory access after this point until
3793 * device is brought back to D0.
3794 */
3795 udelay(50);
3796 break;
3797 }
3798 default:
3799 return -EINVAL;
3800 }
3801 return 0;
3802}
3803
3804static int
3805bnx2_acquire_nvram_lock(struct bnx2 *bp)
3806{
3807 u32 val;
3808 int j;
3809
3810 /* Request access to the flash interface. */
3811 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3812 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3813 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3814 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3815 break;
3816
3817 udelay(5);
3818 }
3819
3820 if (j >= NVRAM_TIMEOUT_COUNT)
3821 return -EBUSY;
3822
3823 return 0;
3824}
3825
3826static int
3827bnx2_release_nvram_lock(struct bnx2 *bp)
3828{
3829 int j;
3830 u32 val;
3831
3832 /* Relinquish nvram interface. */
3833 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3834
3835 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3836 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3837 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3838 break;
3839
3840 udelay(5);
3841 }
3842
3843 if (j >= NVRAM_TIMEOUT_COUNT)
3844 return -EBUSY;
3845
3846 return 0;
3847}
3848
3849
3850static int
3851bnx2_enable_nvram_write(struct bnx2 *bp)
3852{
3853 u32 val;
3854
3855 val = REG_RD(bp, BNX2_MISC_CFG);
3856 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3857
e30372c9 3858 if (bp->flash_info->flags & BNX2_NV_WREN) {
b6016b76
MC
3859 int j;
3860
3861 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3862 REG_WR(bp, BNX2_NVM_COMMAND,
3863 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3864
3865 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3866 udelay(5);
3867
3868 val = REG_RD(bp, BNX2_NVM_COMMAND);
3869 if (val & BNX2_NVM_COMMAND_DONE)
3870 break;
3871 }
3872
3873 if (j >= NVRAM_TIMEOUT_COUNT)
3874 return -EBUSY;
3875 }
3876 return 0;
3877}
3878
3879static void
3880bnx2_disable_nvram_write(struct bnx2 *bp)
3881{
3882 u32 val;
3883
3884 val = REG_RD(bp, BNX2_MISC_CFG);
3885 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3886}
3887
3888
3889static void
3890bnx2_enable_nvram_access(struct bnx2 *bp)
3891{
3892 u32 val;
3893
3894 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3895 /* Enable both bits, even on read. */
6aa20a22 3896 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
b6016b76
MC
3897 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3898}
3899
3900static void
3901bnx2_disable_nvram_access(struct bnx2 *bp)
3902{
3903 u32 val;
3904
3905 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3906 /* Disable both bits, even after read. */
6aa20a22 3907 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
b6016b76
MC
3908 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3909 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3910}
3911
3912static int
3913bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3914{
3915 u32 cmd;
3916 int j;
3917
e30372c9 3918 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
b6016b76
MC
3919 /* Buffered flash, no erase needed */
3920 return 0;
3921
3922 /* Build an erase command */
3923 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3924 BNX2_NVM_COMMAND_DOIT;
3925
3926 /* Need to clear DONE bit separately. */
3927 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3928
3929 /* Address of the NVRAM to read from. */
3930 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3931
3932 /* Issue an erase command. */
3933 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3934
3935 /* Wait for completion. */
3936 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3937 u32 val;
3938
3939 udelay(5);
3940
3941 val = REG_RD(bp, BNX2_NVM_COMMAND);
3942 if (val & BNX2_NVM_COMMAND_DONE)
3943 break;
3944 }
3945
3946 if (j >= NVRAM_TIMEOUT_COUNT)
3947 return -EBUSY;
3948
3949 return 0;
3950}
3951
3952static int
3953bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3954{
3955 u32 cmd;
3956 int j;
3957
3958 /* Build the command word. */
3959 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3960
e30372c9
MC
3961 /* Calculate an offset of a buffered flash, not needed for 5709. */
3962 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
b6016b76
MC
3963 offset = ((offset / bp->flash_info->page_size) <<
3964 bp->flash_info->page_bits) +
3965 (offset % bp->flash_info->page_size);
3966 }
3967
3968 /* Need to clear DONE bit separately. */
3969 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3970
3971 /* Address of the NVRAM to read from. */
3972 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3973
3974 /* Issue a read command. */
3975 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3976
3977 /* Wait for completion. */
3978 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3979 u32 val;
3980
3981 udelay(5);
3982
3983 val = REG_RD(bp, BNX2_NVM_COMMAND);
3984 if (val & BNX2_NVM_COMMAND_DONE) {
b491edd5
AV
3985 __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
3986 memcpy(ret_val, &v, 4);
b6016b76
MC
3987 break;
3988 }
3989 }
3990 if (j >= NVRAM_TIMEOUT_COUNT)
3991 return -EBUSY;
3992
3993 return 0;
3994}
3995
3996
3997static int
3998bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3999{
b491edd5
AV
4000 u32 cmd;
4001 __be32 val32;
b6016b76
MC
4002 int j;
4003
4004 /* Build the command word. */
4005 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4006
e30372c9
MC
4007 /* Calculate an offset of a buffered flash, not needed for 5709. */
4008 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
b6016b76
MC
4009 offset = ((offset / bp->flash_info->page_size) <<
4010 bp->flash_info->page_bits) +
4011 (offset % bp->flash_info->page_size);
4012 }
4013
4014 /* Need to clear DONE bit separately. */
4015 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4016
4017 memcpy(&val32, val, 4);
b6016b76
MC
4018
4019 /* Write the data. */
b491edd5 4020 REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
b6016b76
MC
4021
4022 /* Address of the NVRAM to write to. */
4023 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4024
4025 /* Issue the write command. */
4026 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4027
4028 /* Wait for completion. */
4029 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4030 udelay(5);
4031
4032 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4033 break;
4034 }
4035 if (j >= NVRAM_TIMEOUT_COUNT)
4036 return -EBUSY;
4037
4038 return 0;
4039}
4040
4041static int
4042bnx2_init_nvram(struct bnx2 *bp)
4043{
4044 u32 val;
e30372c9 4045 int j, entry_count, rc = 0;
b6016b76
MC
4046 struct flash_spec *flash;
4047
e30372c9
MC
4048 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4049 bp->flash_info = &flash_5709;
4050 goto get_flash_size;
4051 }
4052
b6016b76
MC
4053 /* Determine the selected interface. */
4054 val = REG_RD(bp, BNX2_NVM_CFG1);
4055
ff8ac609 4056 entry_count = ARRAY_SIZE(flash_table);
b6016b76 4057
b6016b76
MC
4058 if (val & 0x40000000) {
4059
4060 /* Flash interface has been reconfigured */
4061 for (j = 0, flash = &flash_table[0]; j < entry_count;
37137709
MC
4062 j++, flash++) {
4063 if ((val & FLASH_BACKUP_STRAP_MASK) ==
4064 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
b6016b76
MC
4065 bp->flash_info = flash;
4066 break;
4067 }
4068 }
4069 }
4070 else {
37137709 4071 u32 mask;
b6016b76
MC
4072 /* Not yet been reconfigured */
4073
37137709
MC
4074 if (val & (1 << 23))
4075 mask = FLASH_BACKUP_STRAP_MASK;
4076 else
4077 mask = FLASH_STRAP_MASK;
4078
b6016b76
MC
4079 for (j = 0, flash = &flash_table[0]; j < entry_count;
4080 j++, flash++) {
4081
37137709 4082 if ((val & mask) == (flash->strapping & mask)) {
b6016b76
MC
4083 bp->flash_info = flash;
4084
4085 /* Request access to the flash interface. */
4086 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4087 return rc;
4088
4089 /* Enable access to flash interface */
4090 bnx2_enable_nvram_access(bp);
4091
4092 /* Reconfigure the flash interface */
4093 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4094 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4095 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4096 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4097
4098 /* Disable access to flash interface */
4099 bnx2_disable_nvram_access(bp);
4100 bnx2_release_nvram_lock(bp);
4101
4102 break;
4103 }
4104 }
4105 } /* if (val & 0x40000000) */
4106
4107 if (j == entry_count) {
4108 bp->flash_info = NULL;
2f23c523 4109 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
1122db71 4110 return -ENODEV;
b6016b76
MC
4111 }
4112
e30372c9 4113get_flash_size:
2726d6e1 4114 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
1122db71
MC
4115 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4116 if (val)
4117 bp->flash_size = val;
4118 else
4119 bp->flash_size = bp->flash_info->total_size;
4120
b6016b76
MC
4121 return rc;
4122}
4123
4124static int
4125bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4126 int buf_size)
4127{
4128 int rc = 0;
4129 u32 cmd_flags, offset32, len32, extra;
4130
4131 if (buf_size == 0)
4132 return 0;
4133
4134 /* Request access to the flash interface. */
4135 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4136 return rc;
4137
4138 /* Enable access to flash interface */
4139 bnx2_enable_nvram_access(bp);
4140
4141 len32 = buf_size;
4142 offset32 = offset;
4143 extra = 0;
4144
4145 cmd_flags = 0;
4146
4147 if (offset32 & 3) {
4148 u8 buf[4];
4149 u32 pre_len;
4150
4151 offset32 &= ~3;
4152 pre_len = 4 - (offset & 3);
4153
4154 if (pre_len >= len32) {
4155 pre_len = len32;
4156 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4157 BNX2_NVM_COMMAND_LAST;
4158 }
4159 else {
4160 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4161 }
4162
4163 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4164
4165 if (rc)
4166 return rc;
4167
4168 memcpy(ret_buf, buf + (offset & 3), pre_len);
4169
4170 offset32 += 4;
4171 ret_buf += pre_len;
4172 len32 -= pre_len;
4173 }
4174 if (len32 & 3) {
4175 extra = 4 - (len32 & 3);
4176 len32 = (len32 + 4) & ~3;
4177 }
4178
4179 if (len32 == 4) {
4180 u8 buf[4];
4181
4182 if (cmd_flags)
4183 cmd_flags = BNX2_NVM_COMMAND_LAST;
4184 else
4185 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4186 BNX2_NVM_COMMAND_LAST;
4187
4188 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4189
4190 memcpy(ret_buf, buf, 4 - extra);
4191 }
4192 else if (len32 > 0) {
4193 u8 buf[4];
4194
4195 /* Read the first word. */
4196 if (cmd_flags)
4197 cmd_flags = 0;
4198 else
4199 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4200
4201 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4202
4203 /* Advance to the next dword. */
4204 offset32 += 4;
4205 ret_buf += 4;
4206 len32 -= 4;
4207
4208 while (len32 > 4 && rc == 0) {
4209 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4210
4211 /* Advance to the next dword. */
4212 offset32 += 4;
4213 ret_buf += 4;
4214 len32 -= 4;
4215 }
4216
4217 if (rc)
4218 return rc;
4219
4220 cmd_flags = BNX2_NVM_COMMAND_LAST;
4221 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4222
4223 memcpy(ret_buf, buf, 4 - extra);
4224 }
4225
4226 /* Disable access to flash interface */
4227 bnx2_disable_nvram_access(bp);
4228
4229 bnx2_release_nvram_lock(bp);
4230
4231 return rc;
4232}
4233
4234static int
4235bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4236 int buf_size)
4237{
4238 u32 written, offset32, len32;
e6be763f 4239 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
b6016b76
MC
4240 int rc = 0;
4241 int align_start, align_end;
4242
4243 buf = data_buf;
4244 offset32 = offset;
4245 len32 = buf_size;
4246 align_start = align_end = 0;
4247
4248 if ((align_start = (offset32 & 3))) {
4249 offset32 &= ~3;
c873879c
MC
4250 len32 += align_start;
4251 if (len32 < 4)
4252 len32 = 4;
b6016b76
MC
4253 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4254 return rc;
4255 }
4256
4257 if (len32 & 3) {
c873879c
MC
4258 align_end = 4 - (len32 & 3);
4259 len32 += align_end;
4260 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4261 return rc;
b6016b76
MC
4262 }
4263
4264 if (align_start || align_end) {
e6be763f
MC
4265 align_buf = kmalloc(len32, GFP_KERNEL);
4266 if (align_buf == NULL)
b6016b76
MC
4267 return -ENOMEM;
4268 if (align_start) {
e6be763f 4269 memcpy(align_buf, start, 4);
b6016b76
MC
4270 }
4271 if (align_end) {
e6be763f 4272 memcpy(align_buf + len32 - 4, end, 4);
b6016b76 4273 }
e6be763f
MC
4274 memcpy(align_buf + align_start, data_buf, buf_size);
4275 buf = align_buf;
b6016b76
MC
4276 }
4277
e30372c9 4278 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
ae181bc4
MC
4279 flash_buffer = kmalloc(264, GFP_KERNEL);
4280 if (flash_buffer == NULL) {
4281 rc = -ENOMEM;
4282 goto nvram_write_end;
4283 }
4284 }
4285
b6016b76
MC
4286 written = 0;
4287 while ((written < len32) && (rc == 0)) {
4288 u32 page_start, page_end, data_start, data_end;
4289 u32 addr, cmd_flags;
4290 int i;
b6016b76
MC
4291
4292 /* Find the page_start addr */
4293 page_start = offset32 + written;
4294 page_start -= (page_start % bp->flash_info->page_size);
4295 /* Find the page_end addr */
4296 page_end = page_start + bp->flash_info->page_size;
4297 /* Find the data_start addr */
4298 data_start = (written == 0) ? offset32 : page_start;
4299 /* Find the data_end addr */
6aa20a22 4300 data_end = (page_end > offset32 + len32) ?
b6016b76
MC
4301 (offset32 + len32) : page_end;
4302
4303 /* Request access to the flash interface. */
4304 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4305 goto nvram_write_end;
4306
4307 /* Enable access to flash interface */
4308 bnx2_enable_nvram_access(bp);
4309
4310 cmd_flags = BNX2_NVM_COMMAND_FIRST;
e30372c9 4311 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
b6016b76
MC
4312 int j;
4313
4314 /* Read the whole page into the buffer
4315 * (non-buffer flash only) */
4316 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4317 if (j == (bp->flash_info->page_size - 4)) {
4318 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4319 }
4320 rc = bnx2_nvram_read_dword(bp,
6aa20a22
JG
4321 page_start + j,
4322 &flash_buffer[j],
b6016b76
MC
4323 cmd_flags);
4324
4325 if (rc)
4326 goto nvram_write_end;
4327
4328 cmd_flags = 0;
4329 }
4330 }
4331
4332 /* Enable writes to flash interface (unlock write-protect) */
4333 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4334 goto nvram_write_end;
4335
b6016b76
MC
4336 /* Loop to write back the buffer data from page_start to
4337 * data_start */
4338 i = 0;
e30372c9 4339 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
c873879c
MC
4340 /* Erase the page */
4341 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4342 goto nvram_write_end;
4343
4344 /* Re-enable the write again for the actual write */
4345 bnx2_enable_nvram_write(bp);
4346
b6016b76
MC
4347 for (addr = page_start; addr < data_start;
4348 addr += 4, i += 4) {
6aa20a22 4349
b6016b76
MC
4350 rc = bnx2_nvram_write_dword(bp, addr,
4351 &flash_buffer[i], cmd_flags);
4352
4353 if (rc != 0)
4354 goto nvram_write_end;
4355
4356 cmd_flags = 0;
4357 }
4358 }
4359
4360 /* Loop to write the new data from data_start to data_end */
bae25761 4361 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
b6016b76 4362 if ((addr == page_end - 4) ||
e30372c9 4363 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
b6016b76
MC
4364 (addr == data_end - 4))) {
4365
4366 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4367 }
4368 rc = bnx2_nvram_write_dword(bp, addr, buf,
4369 cmd_flags);
4370
4371 if (rc != 0)
4372 goto nvram_write_end;
4373
4374 cmd_flags = 0;
4375 buf += 4;
4376 }
4377
4378 /* Loop to write back the buffer data from data_end
4379 * to page_end */
e30372c9 4380 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
b6016b76
MC
4381 for (addr = data_end; addr < page_end;
4382 addr += 4, i += 4) {
6aa20a22 4383
b6016b76
MC
4384 if (addr == page_end-4) {
4385 cmd_flags = BNX2_NVM_COMMAND_LAST;
4386 }
4387 rc = bnx2_nvram_write_dword(bp, addr,
4388 &flash_buffer[i], cmd_flags);
4389
4390 if (rc != 0)
4391 goto nvram_write_end;
4392
4393 cmd_flags = 0;
4394 }
4395 }
4396
4397 /* Disable writes to flash interface (lock write-protect) */
4398 bnx2_disable_nvram_write(bp);
4399
4400 /* Disable access to flash interface */
4401 bnx2_disable_nvram_access(bp);
4402 bnx2_release_nvram_lock(bp);
4403
4404 /* Increment written */
4405 written += data_end - data_start;
4406 }
4407
4408nvram_write_end:
e6be763f
MC
4409 kfree(flash_buffer);
4410 kfree(align_buf);
b6016b76
MC
4411 return rc;
4412}
4413
0d8a6571 4414static void
7c62e83b 4415bnx2_init_fw_cap(struct bnx2 *bp)
0d8a6571 4416{
7c62e83b 4417 u32 val, sig = 0;
0d8a6571 4418
583c28e5 4419 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
7c62e83b
MC
4420 bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4421
4422 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4423 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
0d8a6571 4424
2726d6e1 4425 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
0d8a6571
MC
4426 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4427 return;
4428
7c62e83b
MC
4429 if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4430 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4431 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4432 }
4433
4434 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4435 (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4436 u32 link;
4437
583c28e5 4438 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
0d8a6571 4439
7c62e83b
MC
4440 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4441 if (link & BNX2_LINK_STATUS_SERDES_LINK)
0d8a6571
MC
4442 bp->phy_port = PORT_FIBRE;
4443 else
4444 bp->phy_port = PORT_TP;
489310a4 4445
7c62e83b
MC
4446 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4447 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
0d8a6571 4448 }
7c62e83b
MC
4449
4450 if (netif_running(bp->dev) && sig)
4451 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
0d8a6571
MC
4452}
4453
b4b36042
MC
4454static void
4455bnx2_setup_msix_tbl(struct bnx2 *bp)
4456{
4457 REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4458
4459 REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4460 REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4461}
4462
b6016b76
MC
4463static int
4464bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4465{
4466 u32 val;
4467 int i, rc = 0;
489310a4 4468 u8 old_port;
b6016b76
MC
4469
4470 /* Wait for the current PCI transaction to complete before
4471 * issuing a reset. */
4472 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4473 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4474 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4475 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4476 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4477 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4478 udelay(5);
4479
b090ae2b 4480 /* Wait for the firmware to tell us it is ok to issue a reset. */
a2f13890 4481 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
b090ae2b 4482
b6016b76
MC
4483 /* Deposit a driver reset signature so the firmware knows that
4484 * this is a soft reset. */
2726d6e1
MC
4485 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4486 BNX2_DRV_RESET_SIGNATURE_MAGIC);
b6016b76 4487
b6016b76
MC
4488 /* Do a dummy read to force the chip to complete all current transaction
4489 * before we issue a reset. */
4490 val = REG_RD(bp, BNX2_MISC_ID);
4491
234754d5
MC
4492 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4493 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4494 REG_RD(bp, BNX2_MISC_COMMAND);
4495 udelay(5);
b6016b76 4496
234754d5
MC
4497 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4498 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
b6016b76 4499
234754d5 4500 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
b6016b76 4501
234754d5
MC
4502 } else {
4503 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4504 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4505 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4506
4507 /* Chip reset. */
4508 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4509
594a9dfa
MC
4510 /* Reading back any register after chip reset will hang the
4511 * bus on 5706 A0 and A1. The msleep below provides plenty
4512 * of margin for write posting.
4513 */
234754d5 4514 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
8e545881
AV
4515 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4516 msleep(20);
b6016b76 4517
234754d5
MC
4518 /* Reset takes approximate 30 usec */
4519 for (i = 0; i < 10; i++) {
4520 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4521 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4522 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4523 break;
4524 udelay(10);
4525 }
4526
4527 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4528 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4529 printk(KERN_ERR PFX "Chip reset did not complete\n");
4530 return -EBUSY;
4531 }
b6016b76
MC
4532 }
4533
4534 /* Make sure byte swapping is properly configured. */
4535 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4536 if (val != 0x01020304) {
4537 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4538 return -ENODEV;
4539 }
4540
b6016b76 4541 /* Wait for the firmware to finish its initialization. */
a2f13890 4542 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
b090ae2b
MC
4543 if (rc)
4544 return rc;
b6016b76 4545
0d8a6571 4546 spin_lock_bh(&bp->phy_lock);
489310a4 4547 old_port = bp->phy_port;
7c62e83b 4548 bnx2_init_fw_cap(bp);
583c28e5
MC
4549 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4550 old_port != bp->phy_port)
0d8a6571
MC
4551 bnx2_set_default_remote_link(bp);
4552 spin_unlock_bh(&bp->phy_lock);
4553
b6016b76
MC
4554 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4555 /* Adjust the voltage regular to two steps lower. The default
4556 * of this register is 0x0000000e. */
4557 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4558
4559 /* Remove bad rbuf memory from the free pool. */
4560 rc = bnx2_alloc_bad_rbuf(bp);
4561 }
4562
f86e82fb 4563 if (bp->flags & BNX2_FLAG_USING_MSIX)
b4b36042
MC
4564 bnx2_setup_msix_tbl(bp);
4565
b6016b76
MC
4566 return rc;
4567}
4568
4569static int
4570bnx2_init_chip(struct bnx2 *bp)
4571{
d8026d93 4572 u32 val, mtu;
b4b36042 4573 int rc, i;
b6016b76
MC
4574
4575 /* Make sure the interrupt is not active. */
4576 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4577
4578 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4579 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4580#ifdef __BIG_ENDIAN
6aa20a22 4581 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
b6016b76 4582#endif
6aa20a22 4583 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
b6016b76
MC
4584 DMA_READ_CHANS << 12 |
4585 DMA_WRITE_CHANS << 16;
4586
4587 val |= (0x2 << 20) | (1 << 11);
4588
f86e82fb 4589 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
b6016b76
MC
4590 val |= (1 << 23);
4591
4592 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
f86e82fb 4593 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
b6016b76
MC
4594 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4595
4596 REG_WR(bp, BNX2_DMA_CONFIG, val);
4597
4598 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4599 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4600 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4601 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4602 }
4603
f86e82fb 4604 if (bp->flags & BNX2_FLAG_PCIX) {
b6016b76
MC
4605 u16 val16;
4606
4607 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4608 &val16);
4609 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4610 val16 & ~PCI_X_CMD_ERO);
4611 }
4612
4613 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4614 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4615 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4616 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4617
4618 /* Initialize context mapping and zero out the quick contexts. The
4619 * context block must have already been enabled. */
641bdcd5
MC
4620 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4621 rc = bnx2_init_5709_context(bp);
4622 if (rc)
4623 return rc;
4624 } else
59b47d8a 4625 bnx2_init_context(bp);
b6016b76 4626
fba9fe91
MC
4627 if ((rc = bnx2_init_cpus(bp)) != 0)
4628 return rc;
4629
b6016b76
MC
4630 bnx2_init_nvram(bp);
4631
5fcaed01 4632 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
b6016b76
MC
4633
4634 val = REG_RD(bp, BNX2_MQ_CONFIG);
4635 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4636 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
68c9f75a
MC
4637 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4638 val |= BNX2_MQ_CONFIG_HALT_DIS;
4639
b6016b76
MC
4640 REG_WR(bp, BNX2_MQ_CONFIG, val);
4641
4642 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4643 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4644 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4645
4646 val = (BCM_PAGE_BITS - 8) << 24;
4647 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4648
4649 /* Configure page size. */
4650 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4651 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4652 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4653 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4654
4655 val = bp->mac_addr[0] +
4656 (bp->mac_addr[1] << 8) +
4657 (bp->mac_addr[2] << 16) +
4658 bp->mac_addr[3] +
4659 (bp->mac_addr[4] << 8) +
4660 (bp->mac_addr[5] << 16);
4661 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4662
4663 /* Program the MTU. Also include 4 bytes for CRC32. */
d8026d93
MC
4664 mtu = bp->dev->mtu;
4665 val = mtu + ETH_HLEN + ETH_FCS_LEN;
b6016b76
MC
4666 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4667 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4668 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4669
d8026d93
MC
4670 if (mtu < 1500)
4671 mtu = 1500;
4672
4673 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4674 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4675 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4676
b4b36042
MC
4677 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4678 bp->bnx2_napi[i].last_status_idx = 0;
4679
efba0180
MC
4680 bp->idle_chk_status_idx = 0xffff;
4681
b6016b76
MC
4682 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4683
4684 /* Set up how to generate a link change interrupt. */
4685 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4686
4687 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4688 (u64) bp->status_blk_mapping & 0xffffffff);
4689 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4690
4691 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4692 (u64) bp->stats_blk_mapping & 0xffffffff);
4693 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4694 (u64) bp->stats_blk_mapping >> 32);
4695
6aa20a22 4696 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
b6016b76
MC
4697 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4698
4699 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4700 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4701
4702 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4703 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4704
4705 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4706
4707 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4708
4709 REG_WR(bp, BNX2_HC_COM_TICKS,
4710 (bp->com_ticks_int << 16) | bp->com_ticks);
4711
4712 REG_WR(bp, BNX2_HC_CMD_TICKS,
4713 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4714
02537b06
MC
4715 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4716 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4717 else
7ea6920e 4718 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
b6016b76
MC
4719 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4720
4721 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
8e6a72c4 4722 val = BNX2_HC_CONFIG_COLLECT_STATS;
b6016b76 4723 else {
8e6a72c4
MC
4724 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4725 BNX2_HC_CONFIG_COLLECT_STATS;
b6016b76
MC
4726 }
4727
5e9ad9e1 4728 if (bp->irq_nvecs > 1) {
c76c0475
MC
4729 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4730 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4731
5e9ad9e1
MC
4732 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4733 }
4734
4735 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4736 val |= BNX2_HC_CONFIG_ONE_SHOT;
4737
4738 REG_WR(bp, BNX2_HC_CONFIG, val);
4739
4740 for (i = 1; i < bp->irq_nvecs; i++) {
4741 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4742 BNX2_HC_SB_CONFIG_1;
4743
6f743ca0 4744 REG_WR(bp, base,
c76c0475 4745 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5e9ad9e1 4746 BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
c76c0475
MC
4747 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4748
6f743ca0 4749 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
c76c0475
MC
4750 (bp->tx_quick_cons_trip_int << 16) |
4751 bp->tx_quick_cons_trip);
4752
6f743ca0 4753 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
c76c0475
MC
4754 (bp->tx_ticks_int << 16) | bp->tx_ticks);
4755
5e9ad9e1
MC
4756 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4757 (bp->rx_quick_cons_trip_int << 16) |
4758 bp->rx_quick_cons_trip);
8e6a72c4 4759
5e9ad9e1
MC
4760 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4761 (bp->rx_ticks_int << 16) | bp->rx_ticks);
4762 }
8e6a72c4 4763
b6016b76
MC
4764 /* Clear internal stats counters. */
4765 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4766
da3e4fbe 4767 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
b6016b76
MC
4768
4769 /* Initialize the receive filter. */
4770 bnx2_set_rx_mode(bp->dev);
4771
0aa38df7
MC
4772 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4773 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4774 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4775 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4776 }
b090ae2b 4777 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
a2f13890 4778 1, 0);
b6016b76 4779
df149d70 4780 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
b6016b76
MC
4781 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4782
4783 udelay(20);
4784
bf5295bb
MC
4785 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4786
b090ae2b 4787 return rc;
b6016b76
MC
4788}
4789
c76c0475
MC
4790static void
4791bnx2_clear_ring_states(struct bnx2 *bp)
4792{
4793 struct bnx2_napi *bnapi;
35e9010b 4794 struct bnx2_tx_ring_info *txr;
bb4f98ab 4795 struct bnx2_rx_ring_info *rxr;
c76c0475
MC
4796 int i;
4797
4798 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4799 bnapi = &bp->bnx2_napi[i];
35e9010b 4800 txr = &bnapi->tx_ring;
bb4f98ab 4801 rxr = &bnapi->rx_ring;
c76c0475 4802
35e9010b
MC
4803 txr->tx_cons = 0;
4804 txr->hw_tx_cons = 0;
bb4f98ab
MC
4805 rxr->rx_prod_bseq = 0;
4806 rxr->rx_prod = 0;
4807 rxr->rx_cons = 0;
4808 rxr->rx_pg_prod = 0;
4809 rxr->rx_pg_cons = 0;
c76c0475
MC
4810 }
4811}
4812
59b47d8a 4813static void
35e9010b 4814bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
59b47d8a
MC
4815{
4816 u32 val, offset0, offset1, offset2, offset3;
62a8313c 4817 u32 cid_addr = GET_CID_ADDR(cid);
59b47d8a
MC
4818
4819 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4820 offset0 = BNX2_L2CTX_TYPE_XI;
4821 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4822 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4823 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4824 } else {
4825 offset0 = BNX2_L2CTX_TYPE;
4826 offset1 = BNX2_L2CTX_CMD_TYPE;
4827 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4828 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4829 }
4830 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
62a8313c 4831 bnx2_ctx_wr(bp, cid_addr, offset0, val);
59b47d8a
MC
4832
4833 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
62a8313c 4834 bnx2_ctx_wr(bp, cid_addr, offset1, val);
59b47d8a 4835
35e9010b 4836 val = (u64) txr->tx_desc_mapping >> 32;
62a8313c 4837 bnx2_ctx_wr(bp, cid_addr, offset2, val);
59b47d8a 4838
35e9010b 4839 val = (u64) txr->tx_desc_mapping & 0xffffffff;
62a8313c 4840 bnx2_ctx_wr(bp, cid_addr, offset3, val);
59b47d8a 4841}
b6016b76
MC
4842
4843static void
35e9010b 4844bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
b6016b76
MC
4845{
4846 struct tx_bd *txbd;
c76c0475
MC
4847 u32 cid = TX_CID;
4848 struct bnx2_napi *bnapi;
35e9010b 4849 struct bnx2_tx_ring_info *txr;
c76c0475 4850
35e9010b
MC
4851 bnapi = &bp->bnx2_napi[ring_num];
4852 txr = &bnapi->tx_ring;
4853
4854 if (ring_num == 0)
4855 cid = TX_CID;
4856 else
4857 cid = TX_TSS_CID + ring_num - 1;
b6016b76 4858
2f8af120
MC
4859 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4860
35e9010b 4861 txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
6aa20a22 4862
35e9010b
MC
4863 txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
4864 txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
b6016b76 4865
35e9010b
MC
4866 txr->tx_prod = 0;
4867 txr->tx_prod_bseq = 0;
6aa20a22 4868
35e9010b
MC
4869 txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4870 txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
b6016b76 4871
35e9010b 4872 bnx2_init_tx_context(bp, cid, txr);
b6016b76
MC
4873}
4874
4875static void
5d5d0015
MC
4876bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4877 int num_rings)
b6016b76 4878{
b6016b76 4879 int i;
5d5d0015 4880 struct rx_bd *rxbd;
6aa20a22 4881
5d5d0015 4882 for (i = 0; i < num_rings; i++) {
13daffa2 4883 int j;
b6016b76 4884
5d5d0015 4885 rxbd = &rx_ring[i][0];
13daffa2 4886 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5d5d0015 4887 rxbd->rx_bd_len = buf_size;
13daffa2
MC
4888 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4889 }
5d5d0015 4890 if (i == (num_rings - 1))
13daffa2
MC
4891 j = 0;
4892 else
4893 j = i + 1;
5d5d0015
MC
4894 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4895 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
13daffa2 4896 }
5d5d0015
MC
4897}
4898
4899static void
bb4f98ab 4900bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5d5d0015
MC
4901{
4902 int i;
4903 u16 prod, ring_prod;
bb4f98ab
MC
4904 u32 cid, rx_cid_addr, val;
4905 struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
4906 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
4907
4908 if (ring_num == 0)
4909 cid = RX_CID;
4910 else
4911 cid = RX_RSS_CID + ring_num - 1;
4912
4913 rx_cid_addr = GET_CID_ADDR(cid);
5d5d0015 4914
bb4f98ab 4915 bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5d5d0015
MC
4916 bp->rx_buf_use_size, bp->rx_max_ring);
4917
bb4f98ab 4918 bnx2_init_rx_context(bp, cid);
83e3fc89
MC
4919
4920 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4921 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
4922 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
4923 }
4924
62a8313c 4925 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
47bf4246 4926 if (bp->rx_pg_ring_size) {
bb4f98ab
MC
4927 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
4928 rxr->rx_pg_desc_mapping,
47bf4246
MC
4929 PAGE_SIZE, bp->rx_max_pg_ring);
4930 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
62a8313c
MC
4931 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4932 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5e9ad9e1 4933 BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
47bf4246 4934
bb4f98ab 4935 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
62a8313c 4936 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
47bf4246 4937
bb4f98ab 4938 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
62a8313c 4939 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
47bf4246
MC
4940
4941 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4942 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4943 }
b6016b76 4944
bb4f98ab 4945 val = (u64) rxr->rx_desc_mapping[0] >> 32;
62a8313c 4946 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
b6016b76 4947
bb4f98ab 4948 val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
62a8313c 4949 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
b6016b76 4950
bb4f98ab 4951 ring_prod = prod = rxr->rx_pg_prod;
47bf4246 4952 for (i = 0; i < bp->rx_pg_ring_size; i++) {
bb4f98ab 4953 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0)
47bf4246
MC
4954 break;
4955 prod = NEXT_RX_BD(prod);
4956 ring_prod = RX_PG_RING_IDX(prod);
4957 }
bb4f98ab 4958 rxr->rx_pg_prod = prod;
47bf4246 4959
bb4f98ab 4960 ring_prod = prod = rxr->rx_prod;
236b6394 4961 for (i = 0; i < bp->rx_ring_size; i++) {
bb4f98ab 4962 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0)
b6016b76 4963 break;
b6016b76
MC
4964 prod = NEXT_RX_BD(prod);
4965 ring_prod = RX_RING_IDX(prod);
4966 }
bb4f98ab 4967 rxr->rx_prod = prod;
b6016b76 4968
bb4f98ab
MC
4969 rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
4970 rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
4971 rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
b6016b76 4972
bb4f98ab
MC
4973 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
4974 REG_WR16(bp, rxr->rx_bidx_addr, prod);
4975
4976 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
b6016b76
MC
4977}
4978
35e9010b
MC
4979static void
4980bnx2_init_all_rings(struct bnx2 *bp)
4981{
4982 int i;
5e9ad9e1 4983 u32 val;
35e9010b
MC
4984
4985 bnx2_clear_ring_states(bp);
4986
4987 REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
4988 for (i = 0; i < bp->num_tx_rings; i++)
4989 bnx2_init_tx_ring(bp, i);
4990
4991 if (bp->num_tx_rings > 1)
4992 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
4993 (TX_TSS_CID << 7));
4994
5e9ad9e1
MC
4995 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
4996 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
4997
bb4f98ab
MC
4998 for (i = 0; i < bp->num_rx_rings; i++)
4999 bnx2_init_rx_ring(bp, i);
5e9ad9e1
MC
5000
5001 if (bp->num_rx_rings > 1) {
5002 u32 tbl_32;
5003 u8 *tbl = (u8 *) &tbl_32;
5004
5005 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
5006 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
5007
5008 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5009 tbl[i % 4] = i % (bp->num_rx_rings - 1);
5010 if ((i % 4) == 3)
5011 bnx2_reg_wr_ind(bp,
5012 BNX2_RXP_SCRATCH_RSS_TBL + i,
5013 cpu_to_be32(tbl_32));
5014 }
5015
5016 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5017 BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5018
5019 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5020
5021 }
35e9010b
MC
5022}
5023
5d5d0015 5024static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
13daffa2 5025{
5d5d0015 5026 u32 max, num_rings = 1;
13daffa2 5027
5d5d0015
MC
5028 while (ring_size > MAX_RX_DESC_CNT) {
5029 ring_size -= MAX_RX_DESC_CNT;
13daffa2
MC
5030 num_rings++;
5031 }
5032 /* round to next power of 2 */
5d5d0015 5033 max = max_size;
13daffa2
MC
5034 while ((max & num_rings) == 0)
5035 max >>= 1;
5036
5037 if (num_rings != max)
5038 max <<= 1;
5039
5d5d0015
MC
5040 return max;
5041}
5042
5043static void
5044bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5045{
84eaa187 5046 u32 rx_size, rx_space, jumbo_size;
5d5d0015
MC
5047
5048 /* 8 for CRC and VLAN */
d89cb6af 5049 rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5d5d0015 5050
84eaa187
MC
5051 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5052 sizeof(struct skb_shared_info);
5053
601d3d18 5054 bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
47bf4246
MC
5055 bp->rx_pg_ring_size = 0;
5056 bp->rx_max_pg_ring = 0;
5057 bp->rx_max_pg_ring_idx = 0;
f86e82fb 5058 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
84eaa187
MC
5059 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5060
5061 jumbo_size = size * pages;
5062 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5063 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5064
5065 bp->rx_pg_ring_size = jumbo_size;
5066 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5067 MAX_RX_PG_RINGS);
5068 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
601d3d18 5069 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
84eaa187
MC
5070 bp->rx_copy_thresh = 0;
5071 }
5d5d0015
MC
5072
5073 bp->rx_buf_use_size = rx_size;
5074 /* hw alignment */
5075 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
d89cb6af 5076 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5d5d0015
MC
5077 bp->rx_ring_size = size;
5078 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
13daffa2
MC
5079 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5080}
5081
b6016b76
MC
5082static void
5083bnx2_free_tx_skbs(struct bnx2 *bp)
5084{
5085 int i;
5086
35e9010b
MC
5087 for (i = 0; i < bp->num_tx_rings; i++) {
5088 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5089 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5090 int j;
b6016b76 5091
35e9010b 5092 if (txr->tx_buf_ring == NULL)
b6016b76 5093 continue;
b6016b76 5094
35e9010b 5095 for (j = 0; j < TX_DESC_CNT; ) {
3d16af86 5096 struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
35e9010b 5097 struct sk_buff *skb = tx_buf->skb;
35e9010b
MC
5098
5099 if (skb == NULL) {
5100 j++;
5101 continue;
5102 }
5103
3d16af86 5104 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
b6016b76 5105
35e9010b 5106 tx_buf->skb = NULL;
b6016b76 5107
3d16af86 5108 j += skb_shinfo(skb)->nr_frags + 1;
35e9010b 5109 dev_kfree_skb(skb);
b6016b76 5110 }
b6016b76 5111 }
b6016b76
MC
5112}
5113
5114static void
5115bnx2_free_rx_skbs(struct bnx2 *bp)
5116{
5117 int i;
5118
bb4f98ab
MC
5119 for (i = 0; i < bp->num_rx_rings; i++) {
5120 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5121 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5122 int j;
b6016b76 5123
bb4f98ab
MC
5124 if (rxr->rx_buf_ring == NULL)
5125 return;
b6016b76 5126
bb4f98ab
MC
5127 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5128 struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5129 struct sk_buff *skb = rx_buf->skb;
b6016b76 5130
bb4f98ab
MC
5131 if (skb == NULL)
5132 continue;
b6016b76 5133
bb4f98ab
MC
5134 pci_unmap_single(bp->pdev,
5135 pci_unmap_addr(rx_buf, mapping),
5136 bp->rx_buf_use_size,
5137 PCI_DMA_FROMDEVICE);
b6016b76 5138
bb4f98ab
MC
5139 rx_buf->skb = NULL;
5140
5141 dev_kfree_skb(skb);
5142 }
5143 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5144 bnx2_free_rx_page(bp, rxr, j);
b6016b76
MC
5145 }
5146}
5147
5148static void
5149bnx2_free_skbs(struct bnx2 *bp)
5150{
5151 bnx2_free_tx_skbs(bp);
5152 bnx2_free_rx_skbs(bp);
5153}
5154
5155static int
5156bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5157{
5158 int rc;
5159
5160 rc = bnx2_reset_chip(bp, reset_code);
5161 bnx2_free_skbs(bp);
5162 if (rc)
5163 return rc;
5164
fba9fe91
MC
5165 if ((rc = bnx2_init_chip(bp)) != 0)
5166 return rc;
5167
35e9010b 5168 bnx2_init_all_rings(bp);
b6016b76
MC
5169 return 0;
5170}
5171
5172static int
9a120bc5 5173bnx2_init_nic(struct bnx2 *bp, int reset_phy)
b6016b76
MC
5174{
5175 int rc;
5176
5177 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5178 return rc;
5179
80be4434 5180 spin_lock_bh(&bp->phy_lock);
9a120bc5 5181 bnx2_init_phy(bp, reset_phy);
b6016b76 5182 bnx2_set_link(bp);
543a827d
MC
5183 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5184 bnx2_remote_phy_event(bp);
0d8a6571 5185 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5186 return 0;
5187}
5188
74bf4ba3
MC
5189static int
5190bnx2_shutdown_chip(struct bnx2 *bp)
5191{
5192 u32 reset_code;
5193
5194 if (bp->flags & BNX2_FLAG_NO_WOL)
5195 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5196 else if (bp->wol)
5197 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5198 else
5199 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5200
5201 return bnx2_reset_chip(bp, reset_code);
5202}
5203
b6016b76
MC
5204static int
5205bnx2_test_registers(struct bnx2 *bp)
5206{
5207 int ret;
5bae30c9 5208 int i, is_5709;
f71e1309 5209 static const struct {
b6016b76
MC
5210 u16 offset;
5211 u16 flags;
5bae30c9 5212#define BNX2_FL_NOT_5709 1
b6016b76
MC
5213 u32 rw_mask;
5214 u32 ro_mask;
5215 } reg_tbl[] = {
5216 { 0x006c, 0, 0x00000000, 0x0000003f },
5217 { 0x0090, 0, 0xffffffff, 0x00000000 },
5218 { 0x0094, 0, 0x00000000, 0x00000000 },
5219
5bae30c9
MC
5220 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5221 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5222 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5223 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5224 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5225 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5226 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5227 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5228 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5229
5230 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5231 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5232 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5233 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5234 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5235 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5236
5237 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5238 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5239 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
b6016b76
MC
5240
5241 { 0x1000, 0, 0x00000000, 0x00000001 },
15b169cc 5242 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
b6016b76
MC
5243
5244 { 0x1408, 0, 0x01c00800, 0x00000000 },
5245 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5246 { 0x14a8, 0, 0x00000000, 0x000001ff },
5b0c76ad 5247 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
b6016b76
MC
5248 { 0x14b0, 0, 0x00000002, 0x00000001 },
5249 { 0x14b8, 0, 0x00000000, 0x00000000 },
5250 { 0x14c0, 0, 0x00000000, 0x00000009 },
5251 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5252 { 0x14cc, 0, 0x00000000, 0x00000001 },
5253 { 0x14d0, 0, 0xffffffff, 0x00000000 },
b6016b76
MC
5254
5255 { 0x1800, 0, 0x00000000, 0x00000001 },
5256 { 0x1804, 0, 0x00000000, 0x00000003 },
b6016b76
MC
5257
5258 { 0x2800, 0, 0x00000000, 0x00000001 },
5259 { 0x2804, 0, 0x00000000, 0x00003f01 },
5260 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5261 { 0x2810, 0, 0xffff0000, 0x00000000 },
5262 { 0x2814, 0, 0xffff0000, 0x00000000 },
5263 { 0x2818, 0, 0xffff0000, 0x00000000 },
5264 { 0x281c, 0, 0xffff0000, 0x00000000 },
5265 { 0x2834, 0, 0xffffffff, 0x00000000 },
5266 { 0x2840, 0, 0x00000000, 0xffffffff },
5267 { 0x2844, 0, 0x00000000, 0xffffffff },
5268 { 0x2848, 0, 0xffffffff, 0x00000000 },
5269 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5270
5271 { 0x2c00, 0, 0x00000000, 0x00000011 },
5272 { 0x2c04, 0, 0x00000000, 0x00030007 },
5273
b6016b76
MC
5274 { 0x3c00, 0, 0x00000000, 0x00000001 },
5275 { 0x3c04, 0, 0x00000000, 0x00070000 },
5276 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5277 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5278 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5279 { 0x3c14, 0, 0x00000000, 0xffffffff },
5280 { 0x3c18, 0, 0x00000000, 0xffffffff },
5281 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5282 { 0x3c20, 0, 0xffffff00, 0x00000000 },
b6016b76
MC
5283
5284 { 0x5004, 0, 0x00000000, 0x0000007f },
5285 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
b6016b76 5286
b6016b76
MC
5287 { 0x5c00, 0, 0x00000000, 0x00000001 },
5288 { 0x5c04, 0, 0x00000000, 0x0003000f },
5289 { 0x5c08, 0, 0x00000003, 0x00000000 },
5290 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5291 { 0x5c10, 0, 0x00000000, 0xffffffff },
5292 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5293 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5294 { 0x5c88, 0, 0x00000000, 0x00077373 },
5295 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5296
5297 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5298 { 0x680c, 0, 0xffffffff, 0x00000000 },
5299 { 0x6810, 0, 0xffffffff, 0x00000000 },
5300 { 0x6814, 0, 0xffffffff, 0x00000000 },
5301 { 0x6818, 0, 0xffffffff, 0x00000000 },
5302 { 0x681c, 0, 0xffffffff, 0x00000000 },
5303 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5304 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5305 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5306 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5307 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5308 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5309 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5310 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5311 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5312 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5313 { 0x684c, 0, 0xffffffff, 0x00000000 },
5314 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5315 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5316 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5317 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5318 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5319 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5320
5321 { 0xffff, 0, 0x00000000, 0x00000000 },
5322 };
5323
5324 ret = 0;
5bae30c9
MC
5325 is_5709 = 0;
5326 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5327 is_5709 = 1;
5328
b6016b76
MC
5329 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5330 u32 offset, rw_mask, ro_mask, save_val, val;
5bae30c9
MC
5331 u16 flags = reg_tbl[i].flags;
5332
5333 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5334 continue;
b6016b76
MC
5335
5336 offset = (u32) reg_tbl[i].offset;
5337 rw_mask = reg_tbl[i].rw_mask;
5338 ro_mask = reg_tbl[i].ro_mask;
5339
14ab9b86 5340 save_val = readl(bp->regview + offset);
b6016b76 5341
14ab9b86 5342 writel(0, bp->regview + offset);
b6016b76 5343
14ab9b86 5344 val = readl(bp->regview + offset);
b6016b76
MC
5345 if ((val & rw_mask) != 0) {
5346 goto reg_test_err;
5347 }
5348
5349 if ((val & ro_mask) != (save_val & ro_mask)) {
5350 goto reg_test_err;
5351 }
5352
14ab9b86 5353 writel(0xffffffff, bp->regview + offset);
b6016b76 5354
14ab9b86 5355 val = readl(bp->regview + offset);
b6016b76
MC
5356 if ((val & rw_mask) != rw_mask) {
5357 goto reg_test_err;
5358 }
5359
5360 if ((val & ro_mask) != (save_val & ro_mask)) {
5361 goto reg_test_err;
5362 }
5363
14ab9b86 5364 writel(save_val, bp->regview + offset);
b6016b76
MC
5365 continue;
5366
5367reg_test_err:
14ab9b86 5368 writel(save_val, bp->regview + offset);
b6016b76
MC
5369 ret = -ENODEV;
5370 break;
5371 }
5372 return ret;
5373}
5374
5375static int
5376bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5377{
f71e1309 5378 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
b6016b76
MC
5379 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5380 int i;
5381
5382 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5383 u32 offset;
5384
5385 for (offset = 0; offset < size; offset += 4) {
5386
2726d6e1 5387 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
b6016b76 5388
2726d6e1 5389 if (bnx2_reg_rd_ind(bp, start + offset) !=
b6016b76
MC
5390 test_pattern[i]) {
5391 return -ENODEV;
5392 }
5393 }
5394 }
5395 return 0;
5396}
5397
5398static int
5399bnx2_test_memory(struct bnx2 *bp)
5400{
5401 int ret = 0;
5402 int i;
5bae30c9 5403 static struct mem_entry {
b6016b76
MC
5404 u32 offset;
5405 u32 len;
5bae30c9 5406 } mem_tbl_5706[] = {
b6016b76 5407 { 0x60000, 0x4000 },
5b0c76ad 5408 { 0xa0000, 0x3000 },
b6016b76
MC
5409 { 0xe0000, 0x4000 },
5410 { 0x120000, 0x4000 },
5411 { 0x1a0000, 0x4000 },
5412 { 0x160000, 0x4000 },
5413 { 0xffffffff, 0 },
5bae30c9
MC
5414 },
5415 mem_tbl_5709[] = {
5416 { 0x60000, 0x4000 },
5417 { 0xa0000, 0x3000 },
5418 { 0xe0000, 0x4000 },
5419 { 0x120000, 0x4000 },
5420 { 0x1a0000, 0x4000 },
5421 { 0xffffffff, 0 },
b6016b76 5422 };
5bae30c9
MC
5423 struct mem_entry *mem_tbl;
5424
5425 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5426 mem_tbl = mem_tbl_5709;
5427 else
5428 mem_tbl = mem_tbl_5706;
b6016b76
MC
5429
5430 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5431 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5432 mem_tbl[i].len)) != 0) {
5433 return ret;
5434 }
5435 }
6aa20a22 5436
b6016b76
MC
5437 return ret;
5438}
5439
bc5a0690
MC
5440#define BNX2_MAC_LOOPBACK 0
5441#define BNX2_PHY_LOOPBACK 1
5442
b6016b76 5443static int
bc5a0690 5444bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
b6016b76
MC
5445{
5446 unsigned int pkt_size, num_pkts, i;
5447 struct sk_buff *skb, *rx_skb;
5448 unsigned char *packet;
bc5a0690 5449 u16 rx_start_idx, rx_idx;
b6016b76
MC
5450 dma_addr_t map;
5451 struct tx_bd *txbd;
5452 struct sw_bd *rx_buf;
5453 struct l2_fhdr *rx_hdr;
5454 int ret = -ENODEV;
c76c0475 5455 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
35e9010b 5456 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
bb4f98ab 5457 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
c76c0475
MC
5458
5459 tx_napi = bnapi;
b6016b76 5460
35e9010b 5461 txr = &tx_napi->tx_ring;
bb4f98ab 5462 rxr = &bnapi->rx_ring;
bc5a0690
MC
5463 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5464 bp->loopback = MAC_LOOPBACK;
5465 bnx2_set_mac_loopback(bp);
5466 }
5467 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
583c28e5 5468 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
489310a4
MC
5469 return 0;
5470
80be4434 5471 bp->loopback = PHY_LOOPBACK;
bc5a0690
MC
5472 bnx2_set_phy_loopback(bp);
5473 }
5474 else
5475 return -EINVAL;
b6016b76 5476
84eaa187 5477 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
932f3772 5478 skb = netdev_alloc_skb(bp->dev, pkt_size);
b6cbc3b6
JL
5479 if (!skb)
5480 return -ENOMEM;
b6016b76 5481 packet = skb_put(skb, pkt_size);
6634292b 5482 memcpy(packet, bp->dev->dev_addr, 6);
b6016b76
MC
5483 memset(packet + 6, 0x0, 8);
5484 for (i = 14; i < pkt_size; i++)
5485 packet[i] = (unsigned char) (i & 0xff);
5486
3d16af86
BL
5487 if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
5488 dev_kfree_skb(skb);
5489 return -EIO;
5490 }
5491 map = skb_shinfo(skb)->dma_maps[0];
b6016b76 5492
bf5295bb
MC
5493 REG_WR(bp, BNX2_HC_COMMAND,
5494 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5495
b6016b76
MC
5496 REG_RD(bp, BNX2_HC_COMMAND);
5497
5498 udelay(5);
35efa7c1 5499 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
b6016b76 5500
b6016b76
MC
5501 num_pkts = 0;
5502
35e9010b 5503 txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
b6016b76
MC
5504
5505 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5506 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5507 txbd->tx_bd_mss_nbytes = pkt_size;
5508 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5509
5510 num_pkts++;
35e9010b
MC
5511 txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5512 txr->tx_prod_bseq += pkt_size;
b6016b76 5513
35e9010b
MC
5514 REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5515 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
b6016b76
MC
5516
5517 udelay(100);
5518
bf5295bb
MC
5519 REG_WR(bp, BNX2_HC_COMMAND,
5520 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5521
b6016b76
MC
5522 REG_RD(bp, BNX2_HC_COMMAND);
5523
5524 udelay(5);
5525
3d16af86 5526 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
745720e5 5527 dev_kfree_skb(skb);
b6016b76 5528
35e9010b 5529 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
b6016b76 5530 goto loopback_test_done;
b6016b76 5531
35efa7c1 5532 rx_idx = bnx2_get_hw_rx_cons(bnapi);
b6016b76
MC
5533 if (rx_idx != rx_start_idx + num_pkts) {
5534 goto loopback_test_done;
5535 }
5536
bb4f98ab 5537 rx_buf = &rxr->rx_buf_ring[rx_start_idx];
b6016b76
MC
5538 rx_skb = rx_buf->skb;
5539
5540 rx_hdr = (struct l2_fhdr *) rx_skb->data;
d89cb6af 5541 skb_reserve(rx_skb, BNX2_RX_OFFSET);
b6016b76
MC
5542
5543 pci_dma_sync_single_for_cpu(bp->pdev,
5544 pci_unmap_addr(rx_buf, mapping),
5545 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5546
ade2bfe7 5547 if (rx_hdr->l2_fhdr_status &
b6016b76
MC
5548 (L2_FHDR_ERRORS_BAD_CRC |
5549 L2_FHDR_ERRORS_PHY_DECODE |
5550 L2_FHDR_ERRORS_ALIGNMENT |
5551 L2_FHDR_ERRORS_TOO_SHORT |
5552 L2_FHDR_ERRORS_GIANT_FRAME)) {
5553
5554 goto loopback_test_done;
5555 }
5556
5557 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5558 goto loopback_test_done;
5559 }
5560
5561 for (i = 14; i < pkt_size; i++) {
5562 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5563 goto loopback_test_done;
5564 }
5565 }
5566
5567 ret = 0;
5568
5569loopback_test_done:
5570 bp->loopback = 0;
5571 return ret;
5572}
5573
bc5a0690
MC
5574#define BNX2_MAC_LOOPBACK_FAILED 1
5575#define BNX2_PHY_LOOPBACK_FAILED 2
5576#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5577 BNX2_PHY_LOOPBACK_FAILED)
5578
5579static int
5580bnx2_test_loopback(struct bnx2 *bp)
5581{
5582 int rc = 0;
5583
5584 if (!netif_running(bp->dev))
5585 return BNX2_LOOPBACK_FAILED;
5586
5587 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5588 spin_lock_bh(&bp->phy_lock);
9a120bc5 5589 bnx2_init_phy(bp, 1);
bc5a0690
MC
5590 spin_unlock_bh(&bp->phy_lock);
5591 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5592 rc |= BNX2_MAC_LOOPBACK_FAILED;
5593 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5594 rc |= BNX2_PHY_LOOPBACK_FAILED;
5595 return rc;
5596}
5597
b6016b76
MC
5598#define NVRAM_SIZE 0x200
5599#define CRC32_RESIDUAL 0xdebb20e3
5600
5601static int
5602bnx2_test_nvram(struct bnx2 *bp)
5603{
b491edd5 5604 __be32 buf[NVRAM_SIZE / 4];
b6016b76
MC
5605 u8 *data = (u8 *) buf;
5606 int rc = 0;
5607 u32 magic, csum;
5608
5609 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5610 goto test_nvram_done;
5611
5612 magic = be32_to_cpu(buf[0]);
5613 if (magic != 0x669955aa) {
5614 rc = -ENODEV;
5615 goto test_nvram_done;
5616 }
5617
5618 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5619 goto test_nvram_done;
5620
5621 csum = ether_crc_le(0x100, data);
5622 if (csum != CRC32_RESIDUAL) {
5623 rc = -ENODEV;
5624 goto test_nvram_done;
5625 }
5626
5627 csum = ether_crc_le(0x100, data + 0x100);
5628 if (csum != CRC32_RESIDUAL) {
5629 rc = -ENODEV;
5630 }
5631
5632test_nvram_done:
5633 return rc;
5634}
5635
5636static int
5637bnx2_test_link(struct bnx2 *bp)
5638{
5639 u32 bmsr;
5640
9f52b564
MC
5641 if (!netif_running(bp->dev))
5642 return -ENODEV;
5643
583c28e5 5644 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
489310a4
MC
5645 if (bp->link_up)
5646 return 0;
5647 return -ENODEV;
5648 }
c770a65c 5649 spin_lock_bh(&bp->phy_lock);
27a005b8
MC
5650 bnx2_enable_bmsr1(bp);
5651 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5652 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5653 bnx2_disable_bmsr1(bp);
c770a65c 5654 spin_unlock_bh(&bp->phy_lock);
6aa20a22 5655
b6016b76
MC
5656 if (bmsr & BMSR_LSTATUS) {
5657 return 0;
5658 }
5659 return -ENODEV;
5660}
5661
5662static int
5663bnx2_test_intr(struct bnx2 *bp)
5664{
5665 int i;
b6016b76
MC
5666 u16 status_idx;
5667
5668 if (!netif_running(bp->dev))
5669 return -ENODEV;
5670
5671 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5672
5673 /* This register is not touched during run-time. */
bf5295bb 5674 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
b6016b76
MC
5675 REG_RD(bp, BNX2_HC_COMMAND);
5676
5677 for (i = 0; i < 10; i++) {
5678 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5679 status_idx) {
5680
5681 break;
5682 }
5683
5684 msleep_interruptible(10);
5685 }
5686 if (i < 10)
5687 return 0;
5688
5689 return -ENODEV;
5690}
5691
38ea3686 5692/* Determining link for parallel detection. */
b2fadeae
MC
5693static int
5694bnx2_5706_serdes_has_link(struct bnx2 *bp)
5695{
5696 u32 mode_ctl, an_dbg, exp;
5697
38ea3686
MC
5698 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5699 return 0;
5700
b2fadeae
MC
5701 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5702 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5703
5704 if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5705 return 0;
5706
5707 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5708 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5709 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5710
f3014c0c 5711 if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
b2fadeae
MC
5712 return 0;
5713
5714 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5715 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5716 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5717
5718 if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
5719 return 0;
5720
5721 return 1;
5722}
5723
b6016b76 5724static void
48b01e2d 5725bnx2_5706_serdes_timer(struct bnx2 *bp)
b6016b76 5726{
b2fadeae
MC
5727 int check_link = 1;
5728
48b01e2d 5729 spin_lock(&bp->phy_lock);
b2fadeae 5730 if (bp->serdes_an_pending) {
48b01e2d 5731 bp->serdes_an_pending--;
b2fadeae
MC
5732 check_link = 0;
5733 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
48b01e2d 5734 u32 bmcr;
b6016b76 5735
ac392abc 5736 bp->current_interval = BNX2_TIMER_INTERVAL;
cd339a0e 5737
ca58c3af 5738 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76 5739
48b01e2d 5740 if (bmcr & BMCR_ANENABLE) {
b2fadeae 5741 if (bnx2_5706_serdes_has_link(bp)) {
48b01e2d
MC
5742 bmcr &= ~BMCR_ANENABLE;
5743 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
ca58c3af 5744 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
583c28e5 5745 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
48b01e2d 5746 }
b6016b76 5747 }
48b01e2d
MC
5748 }
5749 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
583c28e5 5750 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
48b01e2d 5751 u32 phy2;
b6016b76 5752
48b01e2d
MC
5753 bnx2_write_phy(bp, 0x17, 0x0f01);
5754 bnx2_read_phy(bp, 0x15, &phy2);
5755 if (phy2 & 0x20) {
5756 u32 bmcr;
cd339a0e 5757
ca58c3af 5758 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
48b01e2d 5759 bmcr |= BMCR_ANENABLE;
ca58c3af 5760 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
b6016b76 5761
583c28e5 5762 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
48b01e2d
MC
5763 }
5764 } else
ac392abc 5765 bp->current_interval = BNX2_TIMER_INTERVAL;
b6016b76 5766
a2724e25 5767 if (check_link) {
b2fadeae
MC
5768 u32 val;
5769
5770 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5771 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5772 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5773
a2724e25
MC
5774 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5775 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5776 bnx2_5706s_force_link_dn(bp, 1);
5777 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5778 } else
5779 bnx2_set_link(bp);
5780 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
5781 bnx2_set_link(bp);
b2fadeae 5782 }
48b01e2d
MC
5783 spin_unlock(&bp->phy_lock);
5784}
b6016b76 5785
f8dd064e
MC
5786static void
5787bnx2_5708_serdes_timer(struct bnx2 *bp)
5788{
583c28e5 5789 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
5790 return;
5791
583c28e5 5792 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
f8dd064e
MC
5793 bp->serdes_an_pending = 0;
5794 return;
5795 }
b6016b76 5796
f8dd064e
MC
5797 spin_lock(&bp->phy_lock);
5798 if (bp->serdes_an_pending)
5799 bp->serdes_an_pending--;
5800 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5801 u32 bmcr;
b6016b76 5802
ca58c3af 5803 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
f8dd064e 5804 if (bmcr & BMCR_ANENABLE) {
605a9e20 5805 bnx2_enable_forced_2g5(bp);
40105c0b 5806 bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
f8dd064e 5807 } else {
605a9e20 5808 bnx2_disable_forced_2g5(bp);
f8dd064e 5809 bp->serdes_an_pending = 2;
ac392abc 5810 bp->current_interval = BNX2_TIMER_INTERVAL;
b6016b76 5811 }
b6016b76 5812
f8dd064e 5813 } else
ac392abc 5814 bp->current_interval = BNX2_TIMER_INTERVAL;
b6016b76 5815
f8dd064e
MC
5816 spin_unlock(&bp->phy_lock);
5817}
5818
48b01e2d
MC
5819static void
5820bnx2_timer(unsigned long data)
5821{
5822 struct bnx2 *bp = (struct bnx2 *) data;
b6016b76 5823
48b01e2d
MC
5824 if (!netif_running(bp->dev))
5825 return;
b6016b76 5826
48b01e2d
MC
5827 if (atomic_read(&bp->intr_sem) != 0)
5828 goto bnx2_restart_timer;
b6016b76 5829
efba0180
MC
5830 if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
5831 BNX2_FLAG_USING_MSI)
5832 bnx2_chk_missed_msi(bp);
5833
df149d70 5834 bnx2_send_heart_beat(bp);
b6016b76 5835
2726d6e1
MC
5836 bp->stats_blk->stat_FwRxDrop =
5837 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
b6016b76 5838
02537b06
MC
5839 /* workaround occasional corrupted counters */
5840 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5841 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5842 BNX2_HC_COMMAND_STATS_NOW);
5843
583c28e5 5844 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
f8dd064e
MC
5845 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5846 bnx2_5706_serdes_timer(bp);
27a005b8 5847 else
f8dd064e 5848 bnx2_5708_serdes_timer(bp);
b6016b76
MC
5849 }
5850
5851bnx2_restart_timer:
cd339a0e 5852 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
5853}
5854
8e6a72c4
MC
5855static int
5856bnx2_request_irq(struct bnx2 *bp)
5857{
6d866ffc 5858 unsigned long flags;
b4b36042
MC
5859 struct bnx2_irq *irq;
5860 int rc = 0, i;
8e6a72c4 5861
f86e82fb 5862 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6d866ffc
MC
5863 flags = 0;
5864 else
5865 flags = IRQF_SHARED;
b4b36042
MC
5866
5867 for (i = 0; i < bp->irq_nvecs; i++) {
5868 irq = &bp->irq_tbl[i];
c76c0475 5869 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
f0ea2e63 5870 &bp->bnx2_napi[i]);
b4b36042
MC
5871 if (rc)
5872 break;
5873 irq->requested = 1;
5874 }
8e6a72c4
MC
5875 return rc;
5876}
5877
5878static void
5879bnx2_free_irq(struct bnx2 *bp)
5880{
b4b36042
MC
5881 struct bnx2_irq *irq;
5882 int i;
8e6a72c4 5883
b4b36042
MC
5884 for (i = 0; i < bp->irq_nvecs; i++) {
5885 irq = &bp->irq_tbl[i];
5886 if (irq->requested)
f0ea2e63 5887 free_irq(irq->vector, &bp->bnx2_napi[i]);
b4b36042 5888 irq->requested = 0;
6d866ffc 5889 }
f86e82fb 5890 if (bp->flags & BNX2_FLAG_USING_MSI)
b4b36042 5891 pci_disable_msi(bp->pdev);
f86e82fb 5892 else if (bp->flags & BNX2_FLAG_USING_MSIX)
b4b36042
MC
5893 pci_disable_msix(bp->pdev);
5894
f86e82fb 5895 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
b4b36042
MC
5896}
5897
5898static void
5e9ad9e1 5899bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
b4b36042 5900{
57851d84
MC
5901 int i, rc;
5902 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
4e1d0de9
MC
5903 struct net_device *dev = bp->dev;
5904 const int len = sizeof(bp->irq_tbl[0].name);
57851d84 5905
b4b36042
MC
5906 bnx2_setup_msix_tbl(bp);
5907 REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
5908 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
5909 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
57851d84
MC
5910
5911 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5912 msix_ent[i].entry = i;
5913 msix_ent[i].vector = 0;
5914 }
5915
5916 rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
5917 if (rc != 0)
5918 return;
5919
5e9ad9e1 5920 bp->irq_nvecs = msix_vecs;
f86e82fb 5921 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
69010313 5922 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
57851d84 5923 bp->irq_tbl[i].vector = msix_ent[i].vector;
69010313
MC
5924 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
5925 bp->irq_tbl[i].handler = bnx2_msi_1shot;
5926 }
6d866ffc
MC
5927}
5928
5929static void
5930bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5931{
5e9ad9e1 5932 int cpus = num_online_cpus();
706bf240 5933 int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
5e9ad9e1 5934
6d866ffc
MC
5935 bp->irq_tbl[0].handler = bnx2_interrupt;
5936 strcpy(bp->irq_tbl[0].name, bp->dev->name);
b4b36042
MC
5937 bp->irq_nvecs = 1;
5938 bp->irq_tbl[0].vector = bp->pdev->irq;
5939
5e9ad9e1
MC
5940 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
5941 bnx2_enable_msix(bp, msix_vecs);
6d866ffc 5942
f86e82fb
DM
5943 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
5944 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6d866ffc 5945 if (pci_enable_msi(bp->pdev) == 0) {
f86e82fb 5946 bp->flags |= BNX2_FLAG_USING_MSI;
6d866ffc 5947 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
f86e82fb 5948 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6d866ffc
MC
5949 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5950 } else
5951 bp->irq_tbl[0].handler = bnx2_msi;
b4b36042
MC
5952
5953 bp->irq_tbl[0].vector = bp->pdev->irq;
6d866ffc
MC
5954 }
5955 }
706bf240
BL
5956
5957 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
5958 bp->dev->real_num_tx_queues = bp->num_tx_rings;
5959
5e9ad9e1 5960 bp->num_rx_rings = bp->irq_nvecs;
8e6a72c4
MC
5961}
5962
b6016b76
MC
5963/* Called with rtnl_lock */
5964static int
5965bnx2_open(struct net_device *dev)
5966{
972ec0d4 5967 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5968 int rc;
5969
1b2f922f
MC
5970 netif_carrier_off(dev);
5971
829ca9a3 5972 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
5973 bnx2_disable_int(bp);
5974
35e9010b
MC
5975 bnx2_setup_int_mode(bp, disable_msi);
5976 bnx2_napi_enable(bp);
b6016b76 5977 rc = bnx2_alloc_mem(bp);
2739a8bb
MC
5978 if (rc)
5979 goto open_err;
b6016b76 5980
8e6a72c4 5981 rc = bnx2_request_irq(bp);
2739a8bb
MC
5982 if (rc)
5983 goto open_err;
b6016b76 5984
9a120bc5 5985 rc = bnx2_init_nic(bp, 1);
2739a8bb
MC
5986 if (rc)
5987 goto open_err;
6aa20a22 5988
cd339a0e 5989 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
5990
5991 atomic_set(&bp->intr_sem, 0);
5992
5993 bnx2_enable_int(bp);
5994
f86e82fb 5995 if (bp->flags & BNX2_FLAG_USING_MSI) {
b6016b76
MC
5996 /* Test MSI to make sure it is working
5997 * If MSI test fails, go back to INTx mode
5998 */
5999 if (bnx2_test_intr(bp) != 0) {
6000 printk(KERN_WARNING PFX "%s: No interrupt was generated"
6001 " using MSI, switching to INTx mode. Please"
6002 " report this failure to the PCI maintainer"
6003 " and include system chipset information.\n",
6004 bp->dev->name);
6005
6006 bnx2_disable_int(bp);
8e6a72c4 6007 bnx2_free_irq(bp);
b6016b76 6008
6d866ffc
MC
6009 bnx2_setup_int_mode(bp, 1);
6010
9a120bc5 6011 rc = bnx2_init_nic(bp, 0);
b6016b76 6012
8e6a72c4
MC
6013 if (!rc)
6014 rc = bnx2_request_irq(bp);
6015
b6016b76 6016 if (rc) {
b6016b76 6017 del_timer_sync(&bp->timer);
2739a8bb 6018 goto open_err;
b6016b76
MC
6019 }
6020 bnx2_enable_int(bp);
6021 }
6022 }
f86e82fb 6023 if (bp->flags & BNX2_FLAG_USING_MSI)
b6016b76 6024 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
f86e82fb 6025 else if (bp->flags & BNX2_FLAG_USING_MSIX)
57851d84 6026 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
b6016b76 6027
706bf240 6028 netif_tx_start_all_queues(dev);
b6016b76
MC
6029
6030 return 0;
2739a8bb
MC
6031
6032open_err:
6033 bnx2_napi_disable(bp);
6034 bnx2_free_skbs(bp);
6035 bnx2_free_irq(bp);
6036 bnx2_free_mem(bp);
6037 return rc;
b6016b76
MC
6038}
6039
6040static void
c4028958 6041bnx2_reset_task(struct work_struct *work)
b6016b76 6042{
c4028958 6043 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
b6016b76 6044
afdc08b9
MC
6045 if (!netif_running(bp->dev))
6046 return;
6047
b6016b76
MC
6048 bnx2_netif_stop(bp);
6049
9a120bc5 6050 bnx2_init_nic(bp, 1);
b6016b76
MC
6051
6052 atomic_set(&bp->intr_sem, 1);
6053 bnx2_netif_start(bp);
6054}
6055
6056static void
6057bnx2_tx_timeout(struct net_device *dev)
6058{
972ec0d4 6059 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6060
6061 /* This allows the netif to be shutdown gracefully before resetting */
6062 schedule_work(&bp->reset_task);
6063}
6064
6065#ifdef BCM_VLAN
6066/* Called with rtnl_lock */
6067static void
6068bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6069{
972ec0d4 6070 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6071
6072 bnx2_netif_stop(bp);
6073
6074 bp->vlgrp = vlgrp;
6075 bnx2_set_rx_mode(dev);
7c62e83b
MC
6076 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6077 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
b6016b76
MC
6078
6079 bnx2_netif_start(bp);
6080}
b6016b76
MC
6081#endif
6082
932ff279 6083/* Called with netif_tx_lock.
2f8af120
MC
6084 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6085 * netif_wake_queue().
b6016b76
MC
6086 */
6087static int
6088bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6089{
972ec0d4 6090 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6091 dma_addr_t mapping;
6092 struct tx_bd *txbd;
3d16af86 6093 struct sw_tx_bd *tx_buf;
b6016b76
MC
6094 u32 len, vlan_tag_flags, last_frag, mss;
6095 u16 prod, ring_prod;
6096 int i;
706bf240
BL
6097 struct bnx2_napi *bnapi;
6098 struct bnx2_tx_ring_info *txr;
6099 struct netdev_queue *txq;
3d16af86 6100 struct skb_shared_info *sp;
706bf240
BL
6101
6102 /* Determine which tx ring we will be placed on */
6103 i = skb_get_queue_mapping(skb);
6104 bnapi = &bp->bnx2_napi[i];
6105 txr = &bnapi->tx_ring;
6106 txq = netdev_get_tx_queue(dev, i);
b6016b76 6107
35e9010b 6108 if (unlikely(bnx2_tx_avail(bp, txr) <
a550c99b 6109 (skb_shinfo(skb)->nr_frags + 1))) {
706bf240 6110 netif_tx_stop_queue(txq);
b6016b76
MC
6111 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
6112 dev->name);
6113
6114 return NETDEV_TX_BUSY;
6115 }
6116 len = skb_headlen(skb);
35e9010b 6117 prod = txr->tx_prod;
b6016b76
MC
6118 ring_prod = TX_RING_IDX(prod);
6119
6120 vlan_tag_flags = 0;
84fa7933 6121 if (skb->ip_summed == CHECKSUM_PARTIAL) {
b6016b76
MC
6122 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6123 }
6124
729b85cd 6125#ifdef BCM_VLAN
79ea13ce 6126 if (bp->vlgrp && vlan_tx_tag_present(skb)) {
b6016b76
MC
6127 vlan_tag_flags |=
6128 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6129 }
729b85cd 6130#endif
fde82055 6131 if ((mss = skb_shinfo(skb)->gso_size)) {
a1efb4b6 6132 u32 tcp_opt_len;
eddc9ec5 6133 struct iphdr *iph;
b6016b76 6134
b6016b76
MC
6135 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6136
4666f87a
MC
6137 tcp_opt_len = tcp_optlen(skb);
6138
6139 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6140 u32 tcp_off = skb_transport_offset(skb) -
6141 sizeof(struct ipv6hdr) - ETH_HLEN;
ab6a5bb6 6142
4666f87a
MC
6143 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6144 TX_BD_FLAGS_SW_FLAGS;
6145 if (likely(tcp_off == 0))
6146 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6147 else {
6148 tcp_off >>= 3;
6149 vlan_tag_flags |= ((tcp_off & 0x3) <<
6150 TX_BD_FLAGS_TCP6_OFF0_SHL) |
6151 ((tcp_off & 0x10) <<
6152 TX_BD_FLAGS_TCP6_OFF4_SHL);
6153 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6154 }
6155 } else {
4666f87a 6156 iph = ip_hdr(skb);
4666f87a
MC
6157 if (tcp_opt_len || (iph->ihl > 5)) {
6158 vlan_tag_flags |= ((iph->ihl - 5) +
6159 (tcp_opt_len >> 2)) << 8;
6160 }
b6016b76 6161 }
4666f87a 6162 } else
b6016b76 6163 mss = 0;
b6016b76 6164
3d16af86
BL
6165 if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
6166 dev_kfree_skb(skb);
6167 return NETDEV_TX_OK;
6168 }
6169
6170 sp = skb_shinfo(skb);
6171 mapping = sp->dma_maps[0];
6aa20a22 6172
35e9010b 6173 tx_buf = &txr->tx_buf_ring[ring_prod];
b6016b76 6174 tx_buf->skb = skb;
b6016b76 6175
35e9010b 6176 txbd = &txr->tx_desc_ring[ring_prod];
b6016b76
MC
6177
6178 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6179 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6180 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6181 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6182
6183 last_frag = skb_shinfo(skb)->nr_frags;
d62fda08
ED
6184 tx_buf->nr_frags = last_frag;
6185 tx_buf->is_gso = skb_is_gso(skb);
b6016b76
MC
6186
6187 for (i = 0; i < last_frag; i++) {
6188 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6189
6190 prod = NEXT_TX_BD(prod);
6191 ring_prod = TX_RING_IDX(prod);
35e9010b 6192 txbd = &txr->tx_desc_ring[ring_prod];
b6016b76
MC
6193
6194 len = frag->size;
3d16af86 6195 mapping = sp->dma_maps[i + 1];
b6016b76
MC
6196
6197 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6198 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6199 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6200 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6201
6202 }
6203 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6204
6205 prod = NEXT_TX_BD(prod);
35e9010b 6206 txr->tx_prod_bseq += skb->len;
b6016b76 6207
35e9010b
MC
6208 REG_WR16(bp, txr->tx_bidx_addr, prod);
6209 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
b6016b76
MC
6210
6211 mmiowb();
6212
35e9010b 6213 txr->tx_prod = prod;
b6016b76 6214
35e9010b 6215 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
706bf240 6216 netif_tx_stop_queue(txq);
35e9010b 6217 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
706bf240 6218 netif_tx_wake_queue(txq);
b6016b76
MC
6219 }
6220
6221 return NETDEV_TX_OK;
6222}
6223
6224/* Called with rtnl_lock */
6225static int
6226bnx2_close(struct net_device *dev)
6227{
972ec0d4 6228 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6229
4bb073c0 6230 cancel_work_sync(&bp->reset_task);
afdc08b9 6231
bea3348e 6232 bnx2_disable_int_sync(bp);
35efa7c1 6233 bnx2_napi_disable(bp);
b6016b76 6234 del_timer_sync(&bp->timer);
74bf4ba3 6235 bnx2_shutdown_chip(bp);
8e6a72c4 6236 bnx2_free_irq(bp);
b6016b76
MC
6237 bnx2_free_skbs(bp);
6238 bnx2_free_mem(bp);
6239 bp->link_up = 0;
6240 netif_carrier_off(bp->dev);
829ca9a3 6241 bnx2_set_power_state(bp, PCI_D3hot);
b6016b76
MC
6242 return 0;
6243}
6244
6245#define GET_NET_STATS64(ctr) \
6246 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
6247 (unsigned long) (ctr##_lo)
6248
6249#define GET_NET_STATS32(ctr) \
6250 (ctr##_lo)
6251
6252#if (BITS_PER_LONG == 64)
6253#define GET_NET_STATS GET_NET_STATS64
6254#else
6255#define GET_NET_STATS GET_NET_STATS32
6256#endif
6257
6258static struct net_device_stats *
6259bnx2_get_stats(struct net_device *dev)
6260{
972ec0d4 6261 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6262 struct statistics_block *stats_blk = bp->stats_blk;
d8e8034d 6263 struct net_device_stats *net_stats = &dev->stats;
b6016b76
MC
6264
6265 if (bp->stats_blk == NULL) {
6266 return net_stats;
6267 }
6268 net_stats->rx_packets =
6269 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
6270 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
6271 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
6272
6273 net_stats->tx_packets =
6274 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
6275 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
6276 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
6277
6278 net_stats->rx_bytes =
6279 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
6280
6281 net_stats->tx_bytes =
6282 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
6283
6aa20a22 6284 net_stats->multicast =
b6016b76
MC
6285 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
6286
6aa20a22 6287 net_stats->collisions =
b6016b76
MC
6288 (unsigned long) stats_blk->stat_EtherStatsCollisions;
6289
6aa20a22 6290 net_stats->rx_length_errors =
b6016b76
MC
6291 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
6292 stats_blk->stat_EtherStatsOverrsizePkts);
6293
6aa20a22 6294 net_stats->rx_over_errors =
b6016b76
MC
6295 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
6296
6aa20a22 6297 net_stats->rx_frame_errors =
b6016b76
MC
6298 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
6299
6aa20a22 6300 net_stats->rx_crc_errors =
b6016b76
MC
6301 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
6302
6303 net_stats->rx_errors = net_stats->rx_length_errors +
6304 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6305 net_stats->rx_crc_errors;
6306
6307 net_stats->tx_aborted_errors =
6308 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
6309 stats_blk->stat_Dot3StatsLateCollisions);
6310
5b0c76ad
MC
6311 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6312 (CHIP_ID(bp) == CHIP_ID_5708_A0))
b6016b76
MC
6313 net_stats->tx_carrier_errors = 0;
6314 else {
6315 net_stats->tx_carrier_errors =
6316 (unsigned long)
6317 stats_blk->stat_Dot3StatsCarrierSenseErrors;
6318 }
6319
6320 net_stats->tx_errors =
6aa20a22 6321 (unsigned long)
b6016b76
MC
6322 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
6323 +
6324 net_stats->tx_aborted_errors +
6325 net_stats->tx_carrier_errors;
6326
cea94db9
MC
6327 net_stats->rx_missed_errors =
6328 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
6329 stats_blk->stat_FwRxDrop);
6330
b6016b76
MC
6331 return net_stats;
6332}
6333
6334/* All ethtool functions called with rtnl_lock */
6335
6336static int
6337bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6338{
972ec0d4 6339 struct bnx2 *bp = netdev_priv(dev);
7b6b8347 6340 int support_serdes = 0, support_copper = 0;
b6016b76
MC
6341
6342 cmd->supported = SUPPORTED_Autoneg;
583c28e5 6343 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7b6b8347
MC
6344 support_serdes = 1;
6345 support_copper = 1;
6346 } else if (bp->phy_port == PORT_FIBRE)
6347 support_serdes = 1;
6348 else
6349 support_copper = 1;
6350
6351 if (support_serdes) {
b6016b76
MC
6352 cmd->supported |= SUPPORTED_1000baseT_Full |
6353 SUPPORTED_FIBRE;
583c28e5 6354 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
605a9e20 6355 cmd->supported |= SUPPORTED_2500baseX_Full;
b6016b76 6356
b6016b76 6357 }
7b6b8347 6358 if (support_copper) {
b6016b76
MC
6359 cmd->supported |= SUPPORTED_10baseT_Half |
6360 SUPPORTED_10baseT_Full |
6361 SUPPORTED_100baseT_Half |
6362 SUPPORTED_100baseT_Full |
6363 SUPPORTED_1000baseT_Full |
6364 SUPPORTED_TP;
6365
b6016b76
MC
6366 }
6367
7b6b8347
MC
6368 spin_lock_bh(&bp->phy_lock);
6369 cmd->port = bp->phy_port;
b6016b76
MC
6370 cmd->advertising = bp->advertising;
6371
6372 if (bp->autoneg & AUTONEG_SPEED) {
6373 cmd->autoneg = AUTONEG_ENABLE;
6374 }
6375 else {
6376 cmd->autoneg = AUTONEG_DISABLE;
6377 }
6378
6379 if (netif_carrier_ok(dev)) {
6380 cmd->speed = bp->line_speed;
6381 cmd->duplex = bp->duplex;
6382 }
6383 else {
6384 cmd->speed = -1;
6385 cmd->duplex = -1;
6386 }
7b6b8347 6387 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6388
6389 cmd->transceiver = XCVR_INTERNAL;
6390 cmd->phy_address = bp->phy_addr;
6391
6392 return 0;
6393}
6aa20a22 6394
b6016b76
MC
6395static int
6396bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6397{
972ec0d4 6398 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6399 u8 autoneg = bp->autoneg;
6400 u8 req_duplex = bp->req_duplex;
6401 u16 req_line_speed = bp->req_line_speed;
6402 u32 advertising = bp->advertising;
7b6b8347
MC
6403 int err = -EINVAL;
6404
6405 spin_lock_bh(&bp->phy_lock);
6406
6407 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6408 goto err_out_unlock;
6409
583c28e5
MC
6410 if (cmd->port != bp->phy_port &&
6411 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
7b6b8347 6412 goto err_out_unlock;
b6016b76 6413
d6b14486
MC
6414 /* If device is down, we can store the settings only if the user
6415 * is setting the currently active port.
6416 */
6417 if (!netif_running(dev) && cmd->port != bp->phy_port)
6418 goto err_out_unlock;
6419
b6016b76
MC
6420 if (cmd->autoneg == AUTONEG_ENABLE) {
6421 autoneg |= AUTONEG_SPEED;
6422
6aa20a22 6423 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
b6016b76
MC
6424
6425 /* allow advertising 1 speed */
6426 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6427 (cmd->advertising == ADVERTISED_10baseT_Full) ||
6428 (cmd->advertising == ADVERTISED_100baseT_Half) ||
6429 (cmd->advertising == ADVERTISED_100baseT_Full)) {
6430
7b6b8347
MC
6431 if (cmd->port == PORT_FIBRE)
6432 goto err_out_unlock;
b6016b76
MC
6433
6434 advertising = cmd->advertising;
6435
27a005b8 6436 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
583c28e5 6437 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
7b6b8347
MC
6438 (cmd->port == PORT_TP))
6439 goto err_out_unlock;
6440 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
b6016b76 6441 advertising = cmd->advertising;
7b6b8347
MC
6442 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6443 goto err_out_unlock;
b6016b76 6444 else {
7b6b8347 6445 if (cmd->port == PORT_FIBRE)
b6016b76 6446 advertising = ETHTOOL_ALL_FIBRE_SPEED;
7b6b8347 6447 else
b6016b76 6448 advertising = ETHTOOL_ALL_COPPER_SPEED;
b6016b76
MC
6449 }
6450 advertising |= ADVERTISED_Autoneg;
6451 }
6452 else {
7b6b8347 6453 if (cmd->port == PORT_FIBRE) {
80be4434
MC
6454 if ((cmd->speed != SPEED_1000 &&
6455 cmd->speed != SPEED_2500) ||
6456 (cmd->duplex != DUPLEX_FULL))
7b6b8347 6457 goto err_out_unlock;
80be4434
MC
6458
6459 if (cmd->speed == SPEED_2500 &&
583c28e5 6460 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
7b6b8347 6461 goto err_out_unlock;
b6016b76 6462 }
7b6b8347
MC
6463 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6464 goto err_out_unlock;
6465
b6016b76
MC
6466 autoneg &= ~AUTONEG_SPEED;
6467 req_line_speed = cmd->speed;
6468 req_duplex = cmd->duplex;
6469 advertising = 0;
6470 }
6471
6472 bp->autoneg = autoneg;
6473 bp->advertising = advertising;
6474 bp->req_line_speed = req_line_speed;
6475 bp->req_duplex = req_duplex;
6476
d6b14486
MC
6477 err = 0;
6478 /* If device is down, the new settings will be picked up when it is
6479 * brought up.
6480 */
6481 if (netif_running(dev))
6482 err = bnx2_setup_phy(bp, cmd->port);
b6016b76 6483
7b6b8347 6484err_out_unlock:
c770a65c 6485 spin_unlock_bh(&bp->phy_lock);
b6016b76 6486
7b6b8347 6487 return err;
b6016b76
MC
6488}
6489
6490static void
6491bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6492{
972ec0d4 6493 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6494
6495 strcpy(info->driver, DRV_MODULE_NAME);
6496 strcpy(info->version, DRV_MODULE_VERSION);
6497 strcpy(info->bus_info, pci_name(bp->pdev));
58fc2ea4 6498 strcpy(info->fw_version, bp->fw_version);
b6016b76
MC
6499}
6500
244ac4f4
MC
6501#define BNX2_REGDUMP_LEN (32 * 1024)
6502
6503static int
6504bnx2_get_regs_len(struct net_device *dev)
6505{
6506 return BNX2_REGDUMP_LEN;
6507}
6508
6509static void
6510bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6511{
6512 u32 *p = _p, i, offset;
6513 u8 *orig_p = _p;
6514 struct bnx2 *bp = netdev_priv(dev);
6515 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6516 0x0800, 0x0880, 0x0c00, 0x0c10,
6517 0x0c30, 0x0d08, 0x1000, 0x101c,
6518 0x1040, 0x1048, 0x1080, 0x10a4,
6519 0x1400, 0x1490, 0x1498, 0x14f0,
6520 0x1500, 0x155c, 0x1580, 0x15dc,
6521 0x1600, 0x1658, 0x1680, 0x16d8,
6522 0x1800, 0x1820, 0x1840, 0x1854,
6523 0x1880, 0x1894, 0x1900, 0x1984,
6524 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6525 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6526 0x2000, 0x2030, 0x23c0, 0x2400,
6527 0x2800, 0x2820, 0x2830, 0x2850,
6528 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6529 0x3c00, 0x3c94, 0x4000, 0x4010,
6530 0x4080, 0x4090, 0x43c0, 0x4458,
6531 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6532 0x4fc0, 0x5010, 0x53c0, 0x5444,
6533 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6534 0x5fc0, 0x6000, 0x6400, 0x6428,
6535 0x6800, 0x6848, 0x684c, 0x6860,
6536 0x6888, 0x6910, 0x8000 };
6537
6538 regs->version = 0;
6539
6540 memset(p, 0, BNX2_REGDUMP_LEN);
6541
6542 if (!netif_running(bp->dev))
6543 return;
6544
6545 i = 0;
6546 offset = reg_boundaries[0];
6547 p += offset;
6548 while (offset < BNX2_REGDUMP_LEN) {
6549 *p++ = REG_RD(bp, offset);
6550 offset += 4;
6551 if (offset == reg_boundaries[i + 1]) {
6552 offset = reg_boundaries[i + 2];
6553 p = (u32 *) (orig_p + offset);
6554 i += 2;
6555 }
6556 }
6557}
6558
b6016b76
MC
6559static void
6560bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6561{
972ec0d4 6562 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6563
f86e82fb 6564 if (bp->flags & BNX2_FLAG_NO_WOL) {
b6016b76
MC
6565 wol->supported = 0;
6566 wol->wolopts = 0;
6567 }
6568 else {
6569 wol->supported = WAKE_MAGIC;
6570 if (bp->wol)
6571 wol->wolopts = WAKE_MAGIC;
6572 else
6573 wol->wolopts = 0;
6574 }
6575 memset(&wol->sopass, 0, sizeof(wol->sopass));
6576}
6577
6578static int
6579bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6580{
972ec0d4 6581 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6582
6583 if (wol->wolopts & ~WAKE_MAGIC)
6584 return -EINVAL;
6585
6586 if (wol->wolopts & WAKE_MAGIC) {
f86e82fb 6587 if (bp->flags & BNX2_FLAG_NO_WOL)
b6016b76
MC
6588 return -EINVAL;
6589
6590 bp->wol = 1;
6591 }
6592 else {
6593 bp->wol = 0;
6594 }
6595 return 0;
6596}
6597
6598static int
6599bnx2_nway_reset(struct net_device *dev)
6600{
972ec0d4 6601 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6602 u32 bmcr;
6603
9f52b564
MC
6604 if (!netif_running(dev))
6605 return -EAGAIN;
6606
b6016b76
MC
6607 if (!(bp->autoneg & AUTONEG_SPEED)) {
6608 return -EINVAL;
6609 }
6610
c770a65c 6611 spin_lock_bh(&bp->phy_lock);
b6016b76 6612
583c28e5 6613 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7b6b8347
MC
6614 int rc;
6615
6616 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6617 spin_unlock_bh(&bp->phy_lock);
6618 return rc;
6619 }
6620
b6016b76 6621 /* Force a link down visible on the other side */
583c28e5 6622 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
ca58c3af 6623 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
c770a65c 6624 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6625
6626 msleep(20);
6627
c770a65c 6628 spin_lock_bh(&bp->phy_lock);
f8dd064e 6629
40105c0b 6630 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
f8dd064e
MC
6631 bp->serdes_an_pending = 1;
6632 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
6633 }
6634
ca58c3af 6635 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76 6636 bmcr &= ~BMCR_LOOPBACK;
ca58c3af 6637 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
b6016b76 6638
c770a65c 6639 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6640
6641 return 0;
6642}
6643
6644static int
6645bnx2_get_eeprom_len(struct net_device *dev)
6646{
972ec0d4 6647 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6648
1122db71 6649 if (bp->flash_info == NULL)
b6016b76
MC
6650 return 0;
6651
1122db71 6652 return (int) bp->flash_size;
b6016b76
MC
6653}
6654
6655static int
6656bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6657 u8 *eebuf)
6658{
972ec0d4 6659 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6660 int rc;
6661
9f52b564
MC
6662 if (!netif_running(dev))
6663 return -EAGAIN;
6664
1064e944 6665 /* parameters already validated in ethtool_get_eeprom */
b6016b76
MC
6666
6667 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6668
6669 return rc;
6670}
6671
6672static int
6673bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6674 u8 *eebuf)
6675{
972ec0d4 6676 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6677 int rc;
6678
9f52b564
MC
6679 if (!netif_running(dev))
6680 return -EAGAIN;
6681
1064e944 6682 /* parameters already validated in ethtool_set_eeprom */
b6016b76
MC
6683
6684 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6685
6686 return rc;
6687}
6688
6689static int
6690bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6691{
972ec0d4 6692 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6693
6694 memset(coal, 0, sizeof(struct ethtool_coalesce));
6695
6696 coal->rx_coalesce_usecs = bp->rx_ticks;
6697 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6698 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6699 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6700
6701 coal->tx_coalesce_usecs = bp->tx_ticks;
6702 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6703 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6704 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6705
6706 coal->stats_block_coalesce_usecs = bp->stats_ticks;
6707
6708 return 0;
6709}
6710
6711static int
6712bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6713{
972ec0d4 6714 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6715
6716 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6717 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6718
6aa20a22 6719 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
b6016b76
MC
6720 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6721
6722 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6723 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6724
6725 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6726 if (bp->rx_quick_cons_trip_int > 0xff)
6727 bp->rx_quick_cons_trip_int = 0xff;
6728
6729 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6730 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6731
6732 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6733 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6734
6735 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6736 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6737
6738 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6739 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6740 0xff;
6741
6742 bp->stats_ticks = coal->stats_block_coalesce_usecs;
02537b06
MC
6743 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6744 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6745 bp->stats_ticks = USEC_PER_SEC;
6746 }
7ea6920e
MC
6747 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6748 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6749 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
b6016b76
MC
6750
6751 if (netif_running(bp->dev)) {
6752 bnx2_netif_stop(bp);
9a120bc5 6753 bnx2_init_nic(bp, 0);
b6016b76
MC
6754 bnx2_netif_start(bp);
6755 }
6756
6757 return 0;
6758}
6759
6760static void
6761bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6762{
972ec0d4 6763 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6764
13daffa2 6765 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
b6016b76 6766 ering->rx_mini_max_pending = 0;
47bf4246 6767 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
b6016b76
MC
6768
6769 ering->rx_pending = bp->rx_ring_size;
6770 ering->rx_mini_pending = 0;
47bf4246 6771 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
b6016b76
MC
6772
6773 ering->tx_max_pending = MAX_TX_DESC_CNT;
6774 ering->tx_pending = bp->tx_ring_size;
6775}
6776
6777static int
5d5d0015 6778bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
b6016b76 6779{
13daffa2
MC
6780 if (netif_running(bp->dev)) {
6781 bnx2_netif_stop(bp);
6782 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6783 bnx2_free_skbs(bp);
6784 bnx2_free_mem(bp);
6785 }
6786
5d5d0015
MC
6787 bnx2_set_rx_ring_size(bp, rx);
6788 bp->tx_ring_size = tx;
b6016b76
MC
6789
6790 if (netif_running(bp->dev)) {
13daffa2
MC
6791 int rc;
6792
6793 rc = bnx2_alloc_mem(bp);
6794 if (rc)
6795 return rc;
9a120bc5 6796 bnx2_init_nic(bp, 0);
b6016b76
MC
6797 bnx2_netif_start(bp);
6798 }
b6016b76
MC
6799 return 0;
6800}
6801
5d5d0015
MC
6802static int
6803bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6804{
6805 struct bnx2 *bp = netdev_priv(dev);
6806 int rc;
6807
6808 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6809 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6810 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6811
6812 return -EINVAL;
6813 }
6814 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6815 return rc;
6816}
6817
b6016b76
MC
6818static void
6819bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6820{
972ec0d4 6821 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6822
6823 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6824 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6825 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6826}
6827
6828static int
6829bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6830{
972ec0d4 6831 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6832
6833 bp->req_flow_ctrl = 0;
6834 if (epause->rx_pause)
6835 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6836 if (epause->tx_pause)
6837 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6838
6839 if (epause->autoneg) {
6840 bp->autoneg |= AUTONEG_FLOW_CTRL;
6841 }
6842 else {
6843 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6844 }
6845
9f52b564
MC
6846 if (netif_running(dev)) {
6847 spin_lock_bh(&bp->phy_lock);
6848 bnx2_setup_phy(bp, bp->phy_port);
6849 spin_unlock_bh(&bp->phy_lock);
6850 }
b6016b76
MC
6851
6852 return 0;
6853}
6854
6855static u32
6856bnx2_get_rx_csum(struct net_device *dev)
6857{
972ec0d4 6858 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6859
6860 return bp->rx_csum;
6861}
6862
6863static int
6864bnx2_set_rx_csum(struct net_device *dev, u32 data)
6865{
972ec0d4 6866 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6867
6868 bp->rx_csum = data;
6869 return 0;
6870}
6871
b11d6213
MC
6872static int
6873bnx2_set_tso(struct net_device *dev, u32 data)
6874{
4666f87a
MC
6875 struct bnx2 *bp = netdev_priv(dev);
6876
6877 if (data) {
b11d6213 6878 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
4666f87a
MC
6879 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6880 dev->features |= NETIF_F_TSO6;
6881 } else
6882 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6883 NETIF_F_TSO_ECN);
b11d6213
MC
6884 return 0;
6885}
6886
cea94db9 6887#define BNX2_NUM_STATS 46
b6016b76 6888
14ab9b86 6889static struct {
b6016b76
MC
6890 char string[ETH_GSTRING_LEN];
6891} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6892 { "rx_bytes" },
6893 { "rx_error_bytes" },
6894 { "tx_bytes" },
6895 { "tx_error_bytes" },
6896 { "rx_ucast_packets" },
6897 { "rx_mcast_packets" },
6898 { "rx_bcast_packets" },
6899 { "tx_ucast_packets" },
6900 { "tx_mcast_packets" },
6901 { "tx_bcast_packets" },
6902 { "tx_mac_errors" },
6903 { "tx_carrier_errors" },
6904 { "rx_crc_errors" },
6905 { "rx_align_errors" },
6906 { "tx_single_collisions" },
6907 { "tx_multi_collisions" },
6908 { "tx_deferred" },
6909 { "tx_excess_collisions" },
6910 { "tx_late_collisions" },
6911 { "tx_total_collisions" },
6912 { "rx_fragments" },
6913 { "rx_jabbers" },
6914 { "rx_undersize_packets" },
6915 { "rx_oversize_packets" },
6916 { "rx_64_byte_packets" },
6917 { "rx_65_to_127_byte_packets" },
6918 { "rx_128_to_255_byte_packets" },
6919 { "rx_256_to_511_byte_packets" },
6920 { "rx_512_to_1023_byte_packets" },
6921 { "rx_1024_to_1522_byte_packets" },
6922 { "rx_1523_to_9022_byte_packets" },
6923 { "tx_64_byte_packets" },
6924 { "tx_65_to_127_byte_packets" },
6925 { "tx_128_to_255_byte_packets" },
6926 { "tx_256_to_511_byte_packets" },
6927 { "tx_512_to_1023_byte_packets" },
6928 { "tx_1024_to_1522_byte_packets" },
6929 { "tx_1523_to_9022_byte_packets" },
6930 { "rx_xon_frames" },
6931 { "rx_xoff_frames" },
6932 { "tx_xon_frames" },
6933 { "tx_xoff_frames" },
6934 { "rx_mac_ctrl_frames" },
6935 { "rx_filtered_packets" },
6936 { "rx_discards" },
cea94db9 6937 { "rx_fw_discards" },
b6016b76
MC
6938};
6939
6940#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6941
f71e1309 6942static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
b6016b76
MC
6943 STATS_OFFSET32(stat_IfHCInOctets_hi),
6944 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6945 STATS_OFFSET32(stat_IfHCOutOctets_hi),
6946 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6947 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6948 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6949 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6950 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6951 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6952 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6953 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6aa20a22
JG
6954 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6955 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6956 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6957 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6958 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6959 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6960 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6961 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6962 STATS_OFFSET32(stat_EtherStatsCollisions),
6963 STATS_OFFSET32(stat_EtherStatsFragments),
6964 STATS_OFFSET32(stat_EtherStatsJabbers),
6965 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6966 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6967 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6968 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6969 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6970 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6971 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6972 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6973 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6974 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6975 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6976 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6977 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6978 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6979 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6980 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6981 STATS_OFFSET32(stat_XonPauseFramesReceived),
6982 STATS_OFFSET32(stat_XoffPauseFramesReceived),
6983 STATS_OFFSET32(stat_OutXonSent),
6984 STATS_OFFSET32(stat_OutXoffSent),
6985 STATS_OFFSET32(stat_MacControlFramesReceived),
6986 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6987 STATS_OFFSET32(stat_IfInMBUFDiscards),
cea94db9 6988 STATS_OFFSET32(stat_FwRxDrop),
b6016b76
MC
6989};
6990
6991/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6992 * skipped because of errata.
6aa20a22 6993 */
14ab9b86 6994static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
b6016b76
MC
6995 8,0,8,8,8,8,8,8,8,8,
6996 4,0,4,4,4,4,4,4,4,4,
6997 4,4,4,4,4,4,4,4,4,4,
6998 4,4,4,4,4,4,4,4,4,4,
cea94db9 6999 4,4,4,4,4,4,
b6016b76
MC
7000};
7001
5b0c76ad
MC
7002static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7003 8,0,8,8,8,8,8,8,8,8,
7004 4,4,4,4,4,4,4,4,4,4,
7005 4,4,4,4,4,4,4,4,4,4,
7006 4,4,4,4,4,4,4,4,4,4,
cea94db9 7007 4,4,4,4,4,4,
5b0c76ad
MC
7008};
7009
b6016b76
MC
7010#define BNX2_NUM_TESTS 6
7011
14ab9b86 7012static struct {
b6016b76
MC
7013 char string[ETH_GSTRING_LEN];
7014} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7015 { "register_test (offline)" },
7016 { "memory_test (offline)" },
7017 { "loopback_test (offline)" },
7018 { "nvram_test (online)" },
7019 { "interrupt_test (online)" },
7020 { "link_test (online)" },
7021};
7022
7023static int
b9f2c044 7024bnx2_get_sset_count(struct net_device *dev, int sset)
b6016b76 7025{
b9f2c044
JG
7026 switch (sset) {
7027 case ETH_SS_TEST:
7028 return BNX2_NUM_TESTS;
7029 case ETH_SS_STATS:
7030 return BNX2_NUM_STATS;
7031 default:
7032 return -EOPNOTSUPP;
7033 }
b6016b76
MC
7034}
7035
7036static void
7037bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7038{
972ec0d4 7039 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7040
9f52b564
MC
7041 bnx2_set_power_state(bp, PCI_D0);
7042
b6016b76
MC
7043 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7044 if (etest->flags & ETH_TEST_FL_OFFLINE) {
80be4434
MC
7045 int i;
7046
b6016b76
MC
7047 bnx2_netif_stop(bp);
7048 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7049 bnx2_free_skbs(bp);
7050
7051 if (bnx2_test_registers(bp) != 0) {
7052 buf[0] = 1;
7053 etest->flags |= ETH_TEST_FL_FAILED;
7054 }
7055 if (bnx2_test_memory(bp) != 0) {
7056 buf[1] = 1;
7057 etest->flags |= ETH_TEST_FL_FAILED;
7058 }
bc5a0690 7059 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
b6016b76 7060 etest->flags |= ETH_TEST_FL_FAILED;
b6016b76 7061
9f52b564
MC
7062 if (!netif_running(bp->dev))
7063 bnx2_shutdown_chip(bp);
b6016b76 7064 else {
9a120bc5 7065 bnx2_init_nic(bp, 1);
b6016b76
MC
7066 bnx2_netif_start(bp);
7067 }
7068
7069 /* wait for link up */
80be4434
MC
7070 for (i = 0; i < 7; i++) {
7071 if (bp->link_up)
7072 break;
7073 msleep_interruptible(1000);
7074 }
b6016b76
MC
7075 }
7076
7077 if (bnx2_test_nvram(bp) != 0) {
7078 buf[3] = 1;
7079 etest->flags |= ETH_TEST_FL_FAILED;
7080 }
7081 if (bnx2_test_intr(bp) != 0) {
7082 buf[4] = 1;
7083 etest->flags |= ETH_TEST_FL_FAILED;
7084 }
7085
7086 if (bnx2_test_link(bp) != 0) {
7087 buf[5] = 1;
7088 etest->flags |= ETH_TEST_FL_FAILED;
7089
7090 }
9f52b564
MC
7091 if (!netif_running(bp->dev))
7092 bnx2_set_power_state(bp, PCI_D3hot);
b6016b76
MC
7093}
7094
7095static void
7096bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7097{
7098 switch (stringset) {
7099 case ETH_SS_STATS:
7100 memcpy(buf, bnx2_stats_str_arr,
7101 sizeof(bnx2_stats_str_arr));
7102 break;
7103 case ETH_SS_TEST:
7104 memcpy(buf, bnx2_tests_str_arr,
7105 sizeof(bnx2_tests_str_arr));
7106 break;
7107 }
7108}
7109
b6016b76
MC
7110static void
7111bnx2_get_ethtool_stats(struct net_device *dev,
7112 struct ethtool_stats *stats, u64 *buf)
7113{
972ec0d4 7114 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7115 int i;
7116 u32 *hw_stats = (u32 *) bp->stats_blk;
14ab9b86 7117 u8 *stats_len_arr = NULL;
b6016b76
MC
7118
7119 if (hw_stats == NULL) {
7120 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7121 return;
7122 }
7123
5b0c76ad
MC
7124 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7125 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7126 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7127 (CHIP_ID(bp) == CHIP_ID_5708_A0))
b6016b76 7128 stats_len_arr = bnx2_5706_stats_len_arr;
5b0c76ad
MC
7129 else
7130 stats_len_arr = bnx2_5708_stats_len_arr;
b6016b76
MC
7131
7132 for (i = 0; i < BNX2_NUM_STATS; i++) {
7133 if (stats_len_arr[i] == 0) {
7134 /* skip this counter */
7135 buf[i] = 0;
7136 continue;
7137 }
7138 if (stats_len_arr[i] == 4) {
7139 /* 4-byte counter */
7140 buf[i] = (u64)
7141 *(hw_stats + bnx2_stats_offset_arr[i]);
7142 continue;
7143 }
7144 /* 8-byte counter */
7145 buf[i] = (((u64) *(hw_stats +
7146 bnx2_stats_offset_arr[i])) << 32) +
7147 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
7148 }
7149}
7150
7151static int
7152bnx2_phys_id(struct net_device *dev, u32 data)
7153{
972ec0d4 7154 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7155 int i;
7156 u32 save;
7157
9f52b564
MC
7158 bnx2_set_power_state(bp, PCI_D0);
7159
b6016b76
MC
7160 if (data == 0)
7161 data = 2;
7162
7163 save = REG_RD(bp, BNX2_MISC_CFG);
7164 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7165
7166 for (i = 0; i < (data * 2); i++) {
7167 if ((i % 2) == 0) {
7168 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7169 }
7170 else {
7171 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7172 BNX2_EMAC_LED_1000MB_OVERRIDE |
7173 BNX2_EMAC_LED_100MB_OVERRIDE |
7174 BNX2_EMAC_LED_10MB_OVERRIDE |
7175 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7176 BNX2_EMAC_LED_TRAFFIC);
7177 }
7178 msleep_interruptible(500);
7179 if (signal_pending(current))
7180 break;
7181 }
7182 REG_WR(bp, BNX2_EMAC_LED, 0);
7183 REG_WR(bp, BNX2_MISC_CFG, save);
9f52b564
MC
7184
7185 if (!netif_running(dev))
7186 bnx2_set_power_state(bp, PCI_D3hot);
7187
b6016b76
MC
7188 return 0;
7189}
7190
4666f87a
MC
7191static int
7192bnx2_set_tx_csum(struct net_device *dev, u32 data)
7193{
7194 struct bnx2 *bp = netdev_priv(dev);
7195
7196 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6460d948 7197 return (ethtool_op_set_tx_ipv6_csum(dev, data));
4666f87a
MC
7198 else
7199 return (ethtool_op_set_tx_csum(dev, data));
7200}
7201
7282d491 7202static const struct ethtool_ops bnx2_ethtool_ops = {
b6016b76
MC
7203 .get_settings = bnx2_get_settings,
7204 .set_settings = bnx2_set_settings,
7205 .get_drvinfo = bnx2_get_drvinfo,
244ac4f4
MC
7206 .get_regs_len = bnx2_get_regs_len,
7207 .get_regs = bnx2_get_regs,
b6016b76
MC
7208 .get_wol = bnx2_get_wol,
7209 .set_wol = bnx2_set_wol,
7210 .nway_reset = bnx2_nway_reset,
7211 .get_link = ethtool_op_get_link,
7212 .get_eeprom_len = bnx2_get_eeprom_len,
7213 .get_eeprom = bnx2_get_eeprom,
7214 .set_eeprom = bnx2_set_eeprom,
7215 .get_coalesce = bnx2_get_coalesce,
7216 .set_coalesce = bnx2_set_coalesce,
7217 .get_ringparam = bnx2_get_ringparam,
7218 .set_ringparam = bnx2_set_ringparam,
7219 .get_pauseparam = bnx2_get_pauseparam,
7220 .set_pauseparam = bnx2_set_pauseparam,
7221 .get_rx_csum = bnx2_get_rx_csum,
7222 .set_rx_csum = bnx2_set_rx_csum,
4666f87a 7223 .set_tx_csum = bnx2_set_tx_csum,
b6016b76 7224 .set_sg = ethtool_op_set_sg,
b11d6213 7225 .set_tso = bnx2_set_tso,
b6016b76
MC
7226 .self_test = bnx2_self_test,
7227 .get_strings = bnx2_get_strings,
7228 .phys_id = bnx2_phys_id,
b6016b76 7229 .get_ethtool_stats = bnx2_get_ethtool_stats,
b9f2c044 7230 .get_sset_count = bnx2_get_sset_count,
b6016b76
MC
7231};
7232
7233/* Called with rtnl_lock */
7234static int
7235bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7236{
14ab9b86 7237 struct mii_ioctl_data *data = if_mii(ifr);
972ec0d4 7238 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7239 int err;
7240
7241 switch(cmd) {
7242 case SIOCGMIIPHY:
7243 data->phy_id = bp->phy_addr;
7244
7245 /* fallthru */
7246 case SIOCGMIIREG: {
7247 u32 mii_regval;
7248
583c28e5 7249 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7b6b8347
MC
7250 return -EOPNOTSUPP;
7251
dad3e452
MC
7252 if (!netif_running(dev))
7253 return -EAGAIN;
7254
c770a65c 7255 spin_lock_bh(&bp->phy_lock);
b6016b76 7256 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
c770a65c 7257 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
7258
7259 data->val_out = mii_regval;
7260
7261 return err;
7262 }
7263
7264 case SIOCSMIIREG:
7265 if (!capable(CAP_NET_ADMIN))
7266 return -EPERM;
7267
583c28e5 7268 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7b6b8347
MC
7269 return -EOPNOTSUPP;
7270
dad3e452
MC
7271 if (!netif_running(dev))
7272 return -EAGAIN;
7273
c770a65c 7274 spin_lock_bh(&bp->phy_lock);
b6016b76 7275 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
c770a65c 7276 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
7277
7278 return err;
7279
7280 default:
7281 /* do nothing */
7282 break;
7283 }
7284 return -EOPNOTSUPP;
7285}
7286
7287/* Called with rtnl_lock */
7288static int
7289bnx2_change_mac_addr(struct net_device *dev, void *p)
7290{
7291 struct sockaddr *addr = p;
972ec0d4 7292 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7293
73eef4cd
MC
7294 if (!is_valid_ether_addr(addr->sa_data))
7295 return -EINVAL;
7296
b6016b76
MC
7297 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7298 if (netif_running(dev))
5fcaed01 7299 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
b6016b76
MC
7300
7301 return 0;
7302}
7303
7304/* Called with rtnl_lock */
7305static int
7306bnx2_change_mtu(struct net_device *dev, int new_mtu)
7307{
972ec0d4 7308 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7309
7310 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7311 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7312 return -EINVAL;
7313
7314 dev->mtu = new_mtu;
5d5d0015 7315 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
b6016b76
MC
7316}
7317
7318#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7319static void
7320poll_bnx2(struct net_device *dev)
7321{
972ec0d4 7322 struct bnx2 *bp = netdev_priv(dev);
b2af2c1d 7323 int i;
b6016b76 7324
b2af2c1d
NH
7325 for (i = 0; i < bp->irq_nvecs; i++) {
7326 disable_irq(bp->irq_tbl[i].vector);
7327 bnx2_interrupt(bp->irq_tbl[i].vector, &bp->bnx2_napi[i]);
7328 enable_irq(bp->irq_tbl[i].vector);
7329 }
b6016b76
MC
7330}
7331#endif
7332
253c8b75
MC
7333static void __devinit
7334bnx2_get_5709_media(struct bnx2 *bp)
7335{
7336 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7337 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7338 u32 strap;
7339
7340 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7341 return;
7342 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
583c28e5 7343 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
253c8b75
MC
7344 return;
7345 }
7346
7347 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7348 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7349 else
7350 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7351
7352 if (PCI_FUNC(bp->pdev->devfn) == 0) {
7353 switch (strap) {
7354 case 0x4:
7355 case 0x5:
7356 case 0x6:
583c28e5 7357 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
253c8b75
MC
7358 return;
7359 }
7360 } else {
7361 switch (strap) {
7362 case 0x1:
7363 case 0x2:
7364 case 0x4:
583c28e5 7365 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
253c8b75
MC
7366 return;
7367 }
7368 }
7369}
7370
883e5151
MC
7371static void __devinit
7372bnx2_get_pci_speed(struct bnx2 *bp)
7373{
7374 u32 reg;
7375
7376 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7377 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7378 u32 clkreg;
7379
f86e82fb 7380 bp->flags |= BNX2_FLAG_PCIX;
883e5151
MC
7381
7382 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7383
7384 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7385 switch (clkreg) {
7386 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7387 bp->bus_speed_mhz = 133;
7388 break;
7389
7390 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7391 bp->bus_speed_mhz = 100;
7392 break;
7393
7394 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7395 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7396 bp->bus_speed_mhz = 66;
7397 break;
7398
7399 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7400 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7401 bp->bus_speed_mhz = 50;
7402 break;
7403
7404 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7405 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7406 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7407 bp->bus_speed_mhz = 33;
7408 break;
7409 }
7410 }
7411 else {
7412 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7413 bp->bus_speed_mhz = 66;
7414 else
7415 bp->bus_speed_mhz = 33;
7416 }
7417
7418 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
f86e82fb 7419 bp->flags |= BNX2_FLAG_PCI_32BIT;
883e5151
MC
7420
7421}
7422
b6016b76
MC
7423static int __devinit
7424bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7425{
7426 struct bnx2 *bp;
7427 unsigned long mem_len;
58fc2ea4 7428 int rc, i, j;
b6016b76 7429 u32 reg;
40453c83 7430 u64 dma_mask, persist_dma_mask;
b6016b76 7431
b6016b76 7432 SET_NETDEV_DEV(dev, &pdev->dev);
972ec0d4 7433 bp = netdev_priv(dev);
b6016b76
MC
7434
7435 bp->flags = 0;
7436 bp->phy_flags = 0;
7437
7438 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7439 rc = pci_enable_device(pdev);
7440 if (rc) {
898eb71c 7441 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
b6016b76
MC
7442 goto err_out;
7443 }
7444
7445 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9b91cf9d 7446 dev_err(&pdev->dev,
2e8a538d 7447 "Cannot find PCI device base address, aborting.\n");
b6016b76
MC
7448 rc = -ENODEV;
7449 goto err_out_disable;
7450 }
7451
7452 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7453 if (rc) {
9b91cf9d 7454 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
b6016b76
MC
7455 goto err_out_disable;
7456 }
7457
7458 pci_set_master(pdev);
6ff2da49 7459 pci_save_state(pdev);
b6016b76
MC
7460
7461 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7462 if (bp->pm_cap == 0) {
9b91cf9d 7463 dev_err(&pdev->dev,
2e8a538d 7464 "Cannot find power management capability, aborting.\n");
b6016b76
MC
7465 rc = -EIO;
7466 goto err_out_release;
7467 }
7468
b6016b76
MC
7469 bp->dev = dev;
7470 bp->pdev = pdev;
7471
7472 spin_lock_init(&bp->phy_lock);
1b8227c4 7473 spin_lock_init(&bp->indirect_lock);
c4028958 7474 INIT_WORK(&bp->reset_task, bnx2_reset_task);
b6016b76
MC
7475
7476 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
706bf240 7477 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS);
b6016b76
MC
7478 dev->mem_end = dev->mem_start + mem_len;
7479 dev->irq = pdev->irq;
7480
7481 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7482
7483 if (!bp->regview) {
9b91cf9d 7484 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
b6016b76
MC
7485 rc = -ENOMEM;
7486 goto err_out_release;
7487 }
7488
7489 /* Configure byte swap and enable write to the reg_window registers.
7490 * Rely on CPU to do target byte swapping on big endian systems
7491 * The chip's target access swapping will not swap all accesses
7492 */
7493 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7494 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7495 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7496
829ca9a3 7497 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
7498
7499 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7500
883e5151
MC
7501 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7502 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7503 dev_err(&pdev->dev,
7504 "Cannot find PCIE capability, aborting.\n");
7505 rc = -EIO;
7506 goto err_out_unmap;
7507 }
f86e82fb 7508 bp->flags |= BNX2_FLAG_PCIE;
2dd201d7 7509 if (CHIP_REV(bp) == CHIP_REV_Ax)
f86e82fb 7510 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
883e5151 7511 } else {
59b47d8a
MC
7512 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7513 if (bp->pcix_cap == 0) {
7514 dev_err(&pdev->dev,
7515 "Cannot find PCIX capability, aborting.\n");
7516 rc = -EIO;
7517 goto err_out_unmap;
7518 }
7519 }
7520
b4b36042
MC
7521 if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7522 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
f86e82fb 7523 bp->flags |= BNX2_FLAG_MSIX_CAP;
b4b36042
MC
7524 }
7525
8e6a72c4
MC
7526 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7527 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
f86e82fb 7528 bp->flags |= BNX2_FLAG_MSI_CAP;
8e6a72c4
MC
7529 }
7530
40453c83
MC
7531 /* 5708 cannot support DMA addresses > 40-bit. */
7532 if (CHIP_NUM(bp) == CHIP_NUM_5708)
50cf156a 7533 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
40453c83 7534 else
6a35528a 7535 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
40453c83
MC
7536
7537 /* Configure DMA attributes. */
7538 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7539 dev->features |= NETIF_F_HIGHDMA;
7540 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7541 if (rc) {
7542 dev_err(&pdev->dev,
7543 "pci_set_consistent_dma_mask failed, aborting.\n");
7544 goto err_out_unmap;
7545 }
284901a9 7546 } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
40453c83
MC
7547 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7548 goto err_out_unmap;
7549 }
7550
f86e82fb 7551 if (!(bp->flags & BNX2_FLAG_PCIE))
883e5151 7552 bnx2_get_pci_speed(bp);
b6016b76
MC
7553
7554 /* 5706A0 may falsely detect SERR and PERR. */
7555 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7556 reg = REG_RD(bp, PCI_COMMAND);
7557 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7558 REG_WR(bp, PCI_COMMAND, reg);
7559 }
7560 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
f86e82fb 7561 !(bp->flags & BNX2_FLAG_PCIX)) {
b6016b76 7562
9b91cf9d 7563 dev_err(&pdev->dev,
2e8a538d 7564 "5706 A1 can only be used in a PCIX bus, aborting.\n");
b6016b76
MC
7565 goto err_out_unmap;
7566 }
7567
7568 bnx2_init_nvram(bp);
7569
2726d6e1 7570 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
e3648b3d
MC
7571
7572 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
24cb230b
MC
7573 BNX2_SHM_HDR_SIGNATURE_SIG) {
7574 u32 off = PCI_FUNC(pdev->devfn) << 2;
7575
2726d6e1 7576 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
24cb230b 7577 } else
e3648b3d
MC
7578 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7579
b6016b76
MC
7580 /* Get the permanent MAC address. First we need to make sure the
7581 * firmware is actually running.
7582 */
2726d6e1 7583 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
b6016b76
MC
7584
7585 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7586 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
9b91cf9d 7587 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
b6016b76
MC
7588 rc = -ENODEV;
7589 goto err_out_unmap;
7590 }
7591
2726d6e1 7592 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
58fc2ea4
MC
7593 for (i = 0, j = 0; i < 3; i++) {
7594 u8 num, k, skip0;
7595
7596 num = (u8) (reg >> (24 - (i * 8)));
7597 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7598 if (num >= k || !skip0 || k == 1) {
7599 bp->fw_version[j++] = (num / k) + '0';
7600 skip0 = 0;
7601 }
7602 }
7603 if (i != 2)
7604 bp->fw_version[j++] = '.';
7605 }
2726d6e1 7606 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
846f5c62
MC
7607 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7608 bp->wol = 1;
7609
7610 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
f86e82fb 7611 bp->flags |= BNX2_FLAG_ASF_ENABLE;
c2d3db8c
MC
7612
7613 for (i = 0; i < 30; i++) {
2726d6e1 7614 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
c2d3db8c
MC
7615 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7616 break;
7617 msleep(10);
7618 }
7619 }
2726d6e1 7620 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
58fc2ea4
MC
7621 reg &= BNX2_CONDITION_MFW_RUN_MASK;
7622 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7623 reg != BNX2_CONDITION_MFW_RUN_NONE) {
2726d6e1 7624 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
58fc2ea4
MC
7625
7626 bp->fw_version[j++] = ' ';
7627 for (i = 0; i < 3; i++) {
2726d6e1 7628 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
58fc2ea4
MC
7629 reg = swab32(reg);
7630 memcpy(&bp->fw_version[j], &reg, 4);
7631 j += 4;
7632 }
7633 }
b6016b76 7634
2726d6e1 7635 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
b6016b76
MC
7636 bp->mac_addr[0] = (u8) (reg >> 8);
7637 bp->mac_addr[1] = (u8) reg;
7638
2726d6e1 7639 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
b6016b76
MC
7640 bp->mac_addr[2] = (u8) (reg >> 24);
7641 bp->mac_addr[3] = (u8) (reg >> 16);
7642 bp->mac_addr[4] = (u8) (reg >> 8);
7643 bp->mac_addr[5] = (u8) reg;
7644
7645 bp->tx_ring_size = MAX_TX_DESC_CNT;
932f3772 7646 bnx2_set_rx_ring_size(bp, 255);
b6016b76
MC
7647
7648 bp->rx_csum = 1;
7649
b6016b76
MC
7650 bp->tx_quick_cons_trip_int = 20;
7651 bp->tx_quick_cons_trip = 20;
7652 bp->tx_ticks_int = 80;
7653 bp->tx_ticks = 80;
6aa20a22 7654
b6016b76
MC
7655 bp->rx_quick_cons_trip_int = 6;
7656 bp->rx_quick_cons_trip = 6;
7657 bp->rx_ticks_int = 18;
7658 bp->rx_ticks = 18;
7659
7ea6920e 7660 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
b6016b76 7661
ac392abc 7662 bp->current_interval = BNX2_TIMER_INTERVAL;
b6016b76 7663
5b0c76ad
MC
7664 bp->phy_addr = 1;
7665
b6016b76 7666 /* Disable WOL support if we are running on a SERDES chip. */
253c8b75
MC
7667 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7668 bnx2_get_5709_media(bp);
7669 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
583c28e5 7670 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
bac0dff6 7671
0d8a6571 7672 bp->phy_port = PORT_TP;
583c28e5 7673 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
0d8a6571 7674 bp->phy_port = PORT_FIBRE;
2726d6e1 7675 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
846f5c62 7676 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
f86e82fb 7677 bp->flags |= BNX2_FLAG_NO_WOL;
846f5c62
MC
7678 bp->wol = 0;
7679 }
38ea3686
MC
7680 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
7681 /* Don't do parallel detect on this board because of
7682 * some board problems. The link will not go down
7683 * if we do parallel detect.
7684 */
7685 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
7686 pdev->subsystem_device == 0x310c)
7687 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
7688 } else {
5b0c76ad 7689 bp->phy_addr = 2;
5b0c76ad 7690 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
583c28e5 7691 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
5b0c76ad 7692 }
261dd5ca
MC
7693 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7694 CHIP_NUM(bp) == CHIP_NUM_5708)
583c28e5 7695 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
fb0c18bd
MC
7696 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7697 (CHIP_REV(bp) == CHIP_REV_Ax ||
7698 CHIP_REV(bp) == CHIP_REV_Bx))
583c28e5 7699 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
b6016b76 7700
7c62e83b
MC
7701 bnx2_init_fw_cap(bp);
7702
16088272
MC
7703 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7704 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
5ec6d7bf
MC
7705 (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
7706 !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
f86e82fb 7707 bp->flags |= BNX2_FLAG_NO_WOL;
846f5c62
MC
7708 bp->wol = 0;
7709 }
dda1e390 7710
b6016b76
MC
7711 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7712 bp->tx_quick_cons_trip_int =
7713 bp->tx_quick_cons_trip;
7714 bp->tx_ticks_int = bp->tx_ticks;
7715 bp->rx_quick_cons_trip_int =
7716 bp->rx_quick_cons_trip;
7717 bp->rx_ticks_int = bp->rx_ticks;
7718 bp->comp_prod_trip_int = bp->comp_prod_trip;
7719 bp->com_ticks_int = bp->com_ticks;
7720 bp->cmd_ticks_int = bp->cmd_ticks;
7721 }
7722
f9317a40
MC
7723 /* Disable MSI on 5706 if AMD 8132 bridge is found.
7724 *
7725 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
7726 * with byte enables disabled on the unused 32-bit word. This is legal
7727 * but causes problems on the AMD 8132 which will eventually stop
7728 * responding after a while.
7729 *
7730 * AMD believes this incompatibility is unique to the 5706, and
88187dfa 7731 * prefers to locally disable MSI rather than globally disabling it.
f9317a40
MC
7732 */
7733 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7734 struct pci_dev *amd_8132 = NULL;
7735
7736 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7737 PCI_DEVICE_ID_AMD_8132_BRIDGE,
7738 amd_8132))) {
f9317a40 7739
44c10138
AK
7740 if (amd_8132->revision >= 0x10 &&
7741 amd_8132->revision <= 0x13) {
f9317a40
MC
7742 disable_msi = 1;
7743 pci_dev_put(amd_8132);
7744 break;
7745 }
7746 }
7747 }
7748
deaf391b 7749 bnx2_set_default_link(bp);
b6016b76
MC
7750 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7751
cd339a0e 7752 init_timer(&bp->timer);
ac392abc 7753 bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
cd339a0e
MC
7754 bp->timer.data = (unsigned long) bp;
7755 bp->timer.function = bnx2_timer;
7756
b6016b76
MC
7757 return 0;
7758
7759err_out_unmap:
7760 if (bp->regview) {
7761 iounmap(bp->regview);
73eef4cd 7762 bp->regview = NULL;
b6016b76
MC
7763 }
7764
7765err_out_release:
7766 pci_release_regions(pdev);
7767
7768err_out_disable:
7769 pci_disable_device(pdev);
7770 pci_set_drvdata(pdev, NULL);
7771
7772err_out:
7773 return rc;
7774}
7775
883e5151
MC
7776static char * __devinit
7777bnx2_bus_string(struct bnx2 *bp, char *str)
7778{
7779 char *s = str;
7780
f86e82fb 7781 if (bp->flags & BNX2_FLAG_PCIE) {
883e5151
MC
7782 s += sprintf(s, "PCI Express");
7783 } else {
7784 s += sprintf(s, "PCI");
f86e82fb 7785 if (bp->flags & BNX2_FLAG_PCIX)
883e5151 7786 s += sprintf(s, "-X");
f86e82fb 7787 if (bp->flags & BNX2_FLAG_PCI_32BIT)
883e5151
MC
7788 s += sprintf(s, " 32-bit");
7789 else
7790 s += sprintf(s, " 64-bit");
7791 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7792 }
7793 return str;
7794}
7795
2ba582b7 7796static void __devinit
35efa7c1
MC
7797bnx2_init_napi(struct bnx2 *bp)
7798{
b4b36042 7799 int i;
35efa7c1 7800
b4b36042 7801 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
35e9010b
MC
7802 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
7803 int (*poll)(struct napi_struct *, int);
7804
7805 if (i == 0)
7806 poll = bnx2_poll;
7807 else
f0ea2e63 7808 poll = bnx2_poll_msix;
35e9010b
MC
7809
7810 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
b4b36042
MC
7811 bnapi->bp = bp;
7812 }
35efa7c1
MC
7813}
7814
0421eae6
SH
7815static const struct net_device_ops bnx2_netdev_ops = {
7816 .ndo_open = bnx2_open,
7817 .ndo_start_xmit = bnx2_start_xmit,
7818 .ndo_stop = bnx2_close,
7819 .ndo_get_stats = bnx2_get_stats,
7820 .ndo_set_rx_mode = bnx2_set_rx_mode,
7821 .ndo_do_ioctl = bnx2_ioctl,
7822 .ndo_validate_addr = eth_validate_addr,
7823 .ndo_set_mac_address = bnx2_change_mac_addr,
7824 .ndo_change_mtu = bnx2_change_mtu,
7825 .ndo_tx_timeout = bnx2_tx_timeout,
7826#ifdef BCM_VLAN
7827 .ndo_vlan_rx_register = bnx2_vlan_rx_register,
7828#endif
7829#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7830 .ndo_poll_controller = poll_bnx2,
7831#endif
7832};
7833
b6016b76
MC
7834static int __devinit
7835bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7836{
7837 static int version_printed = 0;
7838 struct net_device *dev = NULL;
7839 struct bnx2 *bp;
0795af57 7840 int rc;
883e5151 7841 char str[40];
b6016b76
MC
7842
7843 if (version_printed++ == 0)
7844 printk(KERN_INFO "%s", version);
7845
7846 /* dev zeroed in init_etherdev */
706bf240 7847 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
b6016b76
MC
7848
7849 if (!dev)
7850 return -ENOMEM;
7851
7852 rc = bnx2_init_board(pdev, dev);
7853 if (rc < 0) {
7854 free_netdev(dev);
7855 return rc;
7856 }
7857
0421eae6 7858 dev->netdev_ops = &bnx2_netdev_ops;
b6016b76 7859 dev->watchdog_timeo = TX_TIMEOUT;
b6016b76 7860 dev->ethtool_ops = &bnx2_ethtool_ops;
b6016b76 7861
972ec0d4 7862 bp = netdev_priv(dev);
35efa7c1 7863 bnx2_init_napi(bp);
b6016b76 7864
1b2f922f
MC
7865 pci_set_drvdata(pdev, dev);
7866
57579f76
MC
7867 rc = bnx2_request_firmware(bp);
7868 if (rc)
7869 goto error;
7870
1b2f922f
MC
7871 memcpy(dev->dev_addr, bp->mac_addr, 6);
7872 memcpy(dev->perm_addr, bp->mac_addr, 6);
1b2f922f 7873
d212f87b 7874 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
4666f87a 7875 if (CHIP_NUM(bp) == CHIP_NUM_5709)
d212f87b
SH
7876 dev->features |= NETIF_F_IPV6_CSUM;
7877
1b2f922f
MC
7878#ifdef BCM_VLAN
7879 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7880#endif
7881 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
4666f87a
MC
7882 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7883 dev->features |= NETIF_F_TSO6;
1b2f922f 7884
b6016b76 7885 if ((rc = register_netdev(dev))) {
9b91cf9d 7886 dev_err(&pdev->dev, "Cannot register net device\n");
57579f76 7887 goto error;
b6016b76
MC
7888 }
7889
883e5151 7890 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
e174961c 7891 "IRQ %d, node addr %pM\n",
b6016b76 7892 dev->name,
fbbf68b7 7893 board_info[ent->driver_data].name,
b6016b76
MC
7894 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7895 ((CHIP_ID(bp) & 0x0ff0) >> 4),
883e5151 7896 bnx2_bus_string(bp, str),
b6016b76 7897 dev->base_addr,
e174961c 7898 bp->pdev->irq, dev->dev_addr);
b6016b76 7899
b6016b76 7900 return 0;
57579f76
MC
7901
7902error:
7903 if (bp->mips_firmware)
7904 release_firmware(bp->mips_firmware);
7905 if (bp->rv2p_firmware)
7906 release_firmware(bp->rv2p_firmware);
7907
7908 if (bp->regview)
7909 iounmap(bp->regview);
7910 pci_release_regions(pdev);
7911 pci_disable_device(pdev);
7912 pci_set_drvdata(pdev, NULL);
7913 free_netdev(dev);
7914 return rc;
b6016b76
MC
7915}
7916
7917static void __devexit
7918bnx2_remove_one(struct pci_dev *pdev)
7919{
7920 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 7921 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7922
afdc08b9
MC
7923 flush_scheduled_work();
7924
b6016b76
MC
7925 unregister_netdev(dev);
7926
57579f76
MC
7927 if (bp->mips_firmware)
7928 release_firmware(bp->mips_firmware);
7929 if (bp->rv2p_firmware)
7930 release_firmware(bp->rv2p_firmware);
7931
b6016b76
MC
7932 if (bp->regview)
7933 iounmap(bp->regview);
7934
7935 free_netdev(dev);
7936 pci_release_regions(pdev);
7937 pci_disable_device(pdev);
7938 pci_set_drvdata(pdev, NULL);
7939}
7940
7941static int
829ca9a3 7942bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
b6016b76
MC
7943{
7944 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 7945 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7946
6caebb02
MC
7947 /* PCI register 4 needs to be saved whether netif_running() or not.
7948 * MSI address and data need to be saved if using MSI and
7949 * netif_running().
7950 */
7951 pci_save_state(pdev);
b6016b76
MC
7952 if (!netif_running(dev))
7953 return 0;
7954
1d60290f 7955 flush_scheduled_work();
b6016b76
MC
7956 bnx2_netif_stop(bp);
7957 netif_device_detach(dev);
7958 del_timer_sync(&bp->timer);
74bf4ba3 7959 bnx2_shutdown_chip(bp);
b6016b76 7960 bnx2_free_skbs(bp);
829ca9a3 7961 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
b6016b76
MC
7962 return 0;
7963}
7964
7965static int
7966bnx2_resume(struct pci_dev *pdev)
7967{
7968 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 7969 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7970
6caebb02 7971 pci_restore_state(pdev);
b6016b76
MC
7972 if (!netif_running(dev))
7973 return 0;
7974
829ca9a3 7975 bnx2_set_power_state(bp, PCI_D0);
b6016b76 7976 netif_device_attach(dev);
9a120bc5 7977 bnx2_init_nic(bp, 1);
b6016b76
MC
7978 bnx2_netif_start(bp);
7979 return 0;
7980}
7981
6ff2da49
WX
7982/**
7983 * bnx2_io_error_detected - called when PCI error is detected
7984 * @pdev: Pointer to PCI device
7985 * @state: The current pci connection state
7986 *
7987 * This function is called after a PCI bus error affecting
7988 * this device has been detected.
7989 */
7990static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
7991 pci_channel_state_t state)
7992{
7993 struct net_device *dev = pci_get_drvdata(pdev);
7994 struct bnx2 *bp = netdev_priv(dev);
7995
7996 rtnl_lock();
7997 netif_device_detach(dev);
7998
7999 if (netif_running(dev)) {
8000 bnx2_netif_stop(bp);
8001 del_timer_sync(&bp->timer);
8002 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8003 }
8004
8005 pci_disable_device(pdev);
8006 rtnl_unlock();
8007
8008 /* Request a slot slot reset. */
8009 return PCI_ERS_RESULT_NEED_RESET;
8010}
8011
8012/**
8013 * bnx2_io_slot_reset - called after the pci bus has been reset.
8014 * @pdev: Pointer to PCI device
8015 *
8016 * Restart the card from scratch, as if from a cold-boot.
8017 */
8018static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8019{
8020 struct net_device *dev = pci_get_drvdata(pdev);
8021 struct bnx2 *bp = netdev_priv(dev);
8022
8023 rtnl_lock();
8024 if (pci_enable_device(pdev)) {
8025 dev_err(&pdev->dev,
8026 "Cannot re-enable PCI device after reset.\n");
8027 rtnl_unlock();
8028 return PCI_ERS_RESULT_DISCONNECT;
8029 }
8030 pci_set_master(pdev);
8031 pci_restore_state(pdev);
8032
8033 if (netif_running(dev)) {
8034 bnx2_set_power_state(bp, PCI_D0);
8035 bnx2_init_nic(bp, 1);
8036 }
8037
8038 rtnl_unlock();
8039 return PCI_ERS_RESULT_RECOVERED;
8040}
8041
8042/**
8043 * bnx2_io_resume - called when traffic can start flowing again.
8044 * @pdev: Pointer to PCI device
8045 *
8046 * This callback is called when the error recovery driver tells us that
8047 * its OK to resume normal operation.
8048 */
8049static void bnx2_io_resume(struct pci_dev *pdev)
8050{
8051 struct net_device *dev = pci_get_drvdata(pdev);
8052 struct bnx2 *bp = netdev_priv(dev);
8053
8054 rtnl_lock();
8055 if (netif_running(dev))
8056 bnx2_netif_start(bp);
8057
8058 netif_device_attach(dev);
8059 rtnl_unlock();
8060}
8061
8062static struct pci_error_handlers bnx2_err_handler = {
8063 .error_detected = bnx2_io_error_detected,
8064 .slot_reset = bnx2_io_slot_reset,
8065 .resume = bnx2_io_resume,
8066};
8067
b6016b76 8068static struct pci_driver bnx2_pci_driver = {
14ab9b86
PH
8069 .name = DRV_MODULE_NAME,
8070 .id_table = bnx2_pci_tbl,
8071 .probe = bnx2_init_one,
8072 .remove = __devexit_p(bnx2_remove_one),
8073 .suspend = bnx2_suspend,
8074 .resume = bnx2_resume,
6ff2da49 8075 .err_handler = &bnx2_err_handler,
b6016b76
MC
8076};
8077
8078static int __init bnx2_init(void)
8079{
29917620 8080 return pci_register_driver(&bnx2_pci_driver);
b6016b76
MC
8081}
8082
8083static void __exit bnx2_cleanup(void)
8084{
8085 pci_unregister_driver(&bnx2_pci_driver);
8086}
8087
8088module_init(bnx2_init);
8089module_exit(bnx2_cleanup);
8090
8091
8092