]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2.c
ixgbe: Include offloaded FCoE data into total rx/tx statistics for 82599
[net-next-2.6.git] / drivers / net / bnx2.c
CommitLineData
b6016b76
MC
1/* bnx2.c: Broadcom NX2 network driver.
2 *
a6952b52 3 * Copyright (c) 2004-2009 Broadcom Corporation
b6016b76
MC
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
f2a4f052
MC
12
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15
16#include <linux/kernel.h>
17#include <linux/timer.h>
18#include <linux/errno.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/interrupt.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/dma-mapping.h>
1977f032 29#include <linux/bitops.h>
f2a4f052
MC
30#include <asm/io.h>
31#include <asm/irq.h>
32#include <linux/delay.h>
33#include <asm/byteorder.h>
c86a31f4 34#include <asm/page.h>
f2a4f052
MC
35#include <linux/time.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
f2a4f052 38#include <linux/if_vlan.h>
08013fa3 39#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
f2a4f052
MC
40#define BCM_VLAN 1
41#endif
f2a4f052 42#include <net/ip.h>
de081fa5 43#include <net/tcp.h>
f2a4f052 44#include <net/checksum.h>
f2a4f052
MC
45#include <linux/workqueue.h>
46#include <linux/crc32.h>
47#include <linux/prefetch.h>
29b12174 48#include <linux/cache.h>
57579f76 49#include <linux/firmware.h>
706bf240 50#include <linux/log2.h>
ccffad25 51#include <linux/list.h>
f2a4f052 52
b6016b76
MC
53#include "bnx2.h"
54#include "bnx2_fw.h"
b3448b0b 55
b6016b76
MC
56#define DRV_MODULE_NAME "bnx2"
57#define PFX DRV_MODULE_NAME ": "
581daf7e
MC
58#define DRV_MODULE_VERSION "2.0.1"
59#define DRV_MODULE_RELDATE "May 6, 2009"
57579f76
MC
60#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-4.6.16.fw"
61#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-4.6.16.fw"
62#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-4.6.17.fw"
63#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-4.6.15.fw"
b6016b76
MC
64
65#define RUN_AT(x) (jiffies + (x))
66
67/* Time in jiffies before concluding the transmitter is hung. */
68#define TX_TIMEOUT (5*HZ)
69
fefa8645 70static char version[] __devinitdata =
b6016b76
MC
71 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
72
73MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
453a9c6e 74MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
b6016b76
MC
75MODULE_LICENSE("GPL");
76MODULE_VERSION(DRV_MODULE_VERSION);
57579f76
MC
77MODULE_FIRMWARE(FW_MIPS_FILE_06);
78MODULE_FIRMWARE(FW_RV2P_FILE_06);
79MODULE_FIRMWARE(FW_MIPS_FILE_09);
80MODULE_FIRMWARE(FW_RV2P_FILE_09);
b6016b76
MC
81
82static int disable_msi = 0;
83
84module_param(disable_msi, int, 0);
85MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
86
87typedef enum {
88 BCM5706 = 0,
89 NC370T,
90 NC370I,
91 BCM5706S,
92 NC370F,
5b0c76ad
MC
93 BCM5708,
94 BCM5708S,
bac0dff6 95 BCM5709,
27a005b8 96 BCM5709S,
7bb0a04f 97 BCM5716,
1caacecb 98 BCM5716S,
b6016b76
MC
99} board_t;
100
101/* indexed by board_t, above */
fefa8645 102static struct {
b6016b76
MC
103 char *name;
104} board_info[] __devinitdata = {
105 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
106 { "HP NC370T Multifunction Gigabit Server Adapter" },
107 { "HP NC370i Multifunction Gigabit Server Adapter" },
108 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
109 { "HP NC370F Multifunction Gigabit Server Adapter" },
5b0c76ad
MC
110 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
111 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
bac0dff6 112 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
27a005b8 113 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
7bb0a04f 114 { "Broadcom NetXtreme II BCM5716 1000Base-T" },
1caacecb 115 { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
b6016b76
MC
116 };
117
7bb0a04f 118static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
b6016b76
MC
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
120 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
122 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
123 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
124 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
5b0c76ad
MC
125 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
126 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
b6016b76
MC
127 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
128 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
129 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
130 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
5b0c76ad
MC
131 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
132 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
bac0dff6
MC
133 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
134 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
27a005b8
MC
135 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
136 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
7bb0a04f
MC
137 { PCI_VENDOR_ID_BROADCOM, 0x163b,
138 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
1caacecb 139 { PCI_VENDOR_ID_BROADCOM, 0x163c,
1f2435e5 140 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
b6016b76
MC
141 { 0, }
142};
143
144static struct flash_spec flash_table[] =
145{
e30372c9
MC
146#define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
147#define NONBUFFERED_FLAGS (BNX2_NV_WREN)
b6016b76 148 /* Slow EEPROM */
37137709 149 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
e30372c9 150 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
b6016b76
MC
151 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
152 "EEPROM - slow"},
37137709
MC
153 /* Expansion entry 0001 */
154 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 155 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
156 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
157 "Entry 0001"},
b6016b76
MC
158 /* Saifun SA25F010 (non-buffered flash) */
159 /* strap, cfg1, & write1 need updates */
37137709 160 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 161 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
b6016b76
MC
162 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
163 "Non-buffered flash (128kB)"},
164 /* Saifun SA25F020 (non-buffered flash) */
165 /* strap, cfg1, & write1 need updates */
37137709 166 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 167 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
b6016b76
MC
168 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
169 "Non-buffered flash (256kB)"},
37137709
MC
170 /* Expansion entry 0100 */
171 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 172 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
173 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
174 "Entry 0100"},
175 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
6aa20a22 176 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
e30372c9 177 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
37137709
MC
178 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
179 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
180 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
181 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
e30372c9 182 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
37137709
MC
183 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
184 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
185 /* Saifun SA25F005 (non-buffered flash) */
186 /* strap, cfg1, & write1 need updates */
187 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 188 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
189 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
190 "Non-buffered flash (64kB)"},
191 /* Fast EEPROM */
192 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
e30372c9 193 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
37137709
MC
194 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
195 "EEPROM - fast"},
196 /* Expansion entry 1001 */
197 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 198 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
199 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
200 "Entry 1001"},
201 /* Expansion entry 1010 */
202 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
204 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205 "Entry 1010"},
206 /* ATMEL AT45DB011B (buffered flash) */
207 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 208 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
209 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
210 "Buffered flash (128kB)"},
211 /* Expansion entry 1100 */
212 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 213 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
214 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
215 "Entry 1100"},
216 /* Expansion entry 1101 */
217 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 218 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
219 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
220 "Entry 1101"},
221 /* Ateml Expansion entry 1110 */
222 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 223 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
224 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
225 "Entry 1110 (Atmel)"},
226 /* ATMEL AT45DB021B (buffered flash) */
227 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 228 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
229 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
230 "Buffered flash (256kB)"},
b6016b76
MC
231};
232
e30372c9
MC
233static struct flash_spec flash_5709 = {
234 .flags = BNX2_NV_BUFFERED,
235 .page_bits = BCM5709_FLASH_PAGE_BITS,
236 .page_size = BCM5709_FLASH_PAGE_SIZE,
237 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
238 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
239 .name = "5709 Buffered flash (256kB)",
240};
241
b6016b76
MC
242MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
243
35e9010b 244static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
e89bbf10 245{
2f8af120 246 u32 diff;
e89bbf10 247
2f8af120 248 smp_mb();
faac9c4b
MC
249
250 /* The ring uses 256 indices for 255 entries, one of them
251 * needs to be skipped.
252 */
35e9010b 253 diff = txr->tx_prod - txr->tx_cons;
faac9c4b
MC
254 if (unlikely(diff >= TX_DESC_CNT)) {
255 diff &= 0xffff;
256 if (diff == TX_DESC_CNT)
257 diff = MAX_TX_DESC_CNT;
258 }
e89bbf10
MC
259 return (bp->tx_ring_size - diff);
260}
261
b6016b76
MC
262static u32
263bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
264{
1b8227c4
MC
265 u32 val;
266
267 spin_lock_bh(&bp->indirect_lock);
b6016b76 268 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
1b8227c4
MC
269 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
270 spin_unlock_bh(&bp->indirect_lock);
271 return val;
b6016b76
MC
272}
273
274static void
275bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
276{
1b8227c4 277 spin_lock_bh(&bp->indirect_lock);
b6016b76
MC
278 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
279 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
1b8227c4 280 spin_unlock_bh(&bp->indirect_lock);
b6016b76
MC
281}
282
2726d6e1
MC
283static void
284bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
285{
286 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
287}
288
289static u32
290bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
291{
292 return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
293}
294
b6016b76
MC
295static void
296bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
297{
298 offset += cid_addr;
1b8227c4 299 spin_lock_bh(&bp->indirect_lock);
59b47d8a
MC
300 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
301 int i;
302
303 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
304 REG_WR(bp, BNX2_CTX_CTX_CTRL,
305 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
306 for (i = 0; i < 5; i++) {
59b47d8a
MC
307 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
308 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
309 break;
310 udelay(5);
311 }
312 } else {
313 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
314 REG_WR(bp, BNX2_CTX_DATA, val);
315 }
1b8227c4 316 spin_unlock_bh(&bp->indirect_lock);
b6016b76
MC
317}
318
319static int
320bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
321{
322 u32 val1;
323 int i, ret;
324
583c28e5 325 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
b6016b76
MC
326 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
327 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
328
329 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
330 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
331
332 udelay(40);
333 }
334
335 val1 = (bp->phy_addr << 21) | (reg << 16) |
336 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
337 BNX2_EMAC_MDIO_COMM_START_BUSY;
338 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
339
340 for (i = 0; i < 50; i++) {
341 udelay(10);
342
343 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
344 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
345 udelay(5);
346
347 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
348 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
349
350 break;
351 }
352 }
353
354 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
355 *val = 0x0;
356 ret = -EBUSY;
357 }
358 else {
359 *val = val1;
360 ret = 0;
361 }
362
583c28e5 363 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
b6016b76
MC
364 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
365 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
366
367 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
368 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
369
370 udelay(40);
371 }
372
373 return ret;
374}
375
376static int
377bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
378{
379 u32 val1;
380 int i, ret;
381
583c28e5 382 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
b6016b76
MC
383 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
384 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
385
386 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
387 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
388
389 udelay(40);
390 }
391
392 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
393 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
394 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
395 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
6aa20a22 396
b6016b76
MC
397 for (i = 0; i < 50; i++) {
398 udelay(10);
399
400 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
401 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
402 udelay(5);
403 break;
404 }
405 }
406
407 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
408 ret = -EBUSY;
409 else
410 ret = 0;
411
583c28e5 412 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
b6016b76
MC
413 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
414 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
415
416 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
417 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
418
419 udelay(40);
420 }
421
422 return ret;
423}
424
425static void
426bnx2_disable_int(struct bnx2 *bp)
427{
b4b36042
MC
428 int i;
429 struct bnx2_napi *bnapi;
430
431 for (i = 0; i < bp->irq_nvecs; i++) {
432 bnapi = &bp->bnx2_napi[i];
433 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
434 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
435 }
b6016b76
MC
436 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
437}
438
439static void
440bnx2_enable_int(struct bnx2 *bp)
441{
b4b36042
MC
442 int i;
443 struct bnx2_napi *bnapi;
35efa7c1 444
b4b36042
MC
445 for (i = 0; i < bp->irq_nvecs; i++) {
446 bnapi = &bp->bnx2_napi[i];
1269a8a6 447
b4b36042
MC
448 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
449 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
450 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
451 bnapi->last_status_idx);
b6016b76 452
b4b36042
MC
453 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
454 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
455 bnapi->last_status_idx);
456 }
bf5295bb 457 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
b6016b76
MC
458}
459
460static void
461bnx2_disable_int_sync(struct bnx2 *bp)
462{
b4b36042
MC
463 int i;
464
b6016b76
MC
465 atomic_inc(&bp->intr_sem);
466 bnx2_disable_int(bp);
b4b36042
MC
467 for (i = 0; i < bp->irq_nvecs; i++)
468 synchronize_irq(bp->irq_tbl[i].vector);
b6016b76
MC
469}
470
35efa7c1
MC
471static void
472bnx2_napi_disable(struct bnx2 *bp)
473{
b4b36042
MC
474 int i;
475
476 for (i = 0; i < bp->irq_nvecs; i++)
477 napi_disable(&bp->bnx2_napi[i].napi);
35efa7c1
MC
478}
479
480static void
481bnx2_napi_enable(struct bnx2 *bp)
482{
b4b36042
MC
483 int i;
484
485 for (i = 0; i < bp->irq_nvecs; i++)
486 napi_enable(&bp->bnx2_napi[i].napi);
35efa7c1
MC
487}
488
b6016b76
MC
489static void
490bnx2_netif_stop(struct bnx2 *bp)
491{
492 bnx2_disable_int_sync(bp);
493 if (netif_running(bp->dev)) {
35efa7c1 494 bnx2_napi_disable(bp);
b6016b76
MC
495 netif_tx_disable(bp->dev);
496 bp->dev->trans_start = jiffies; /* prevent tx timeout */
497 }
498}
499
500static void
501bnx2_netif_start(struct bnx2 *bp)
502{
503 if (atomic_dec_and_test(&bp->intr_sem)) {
504 if (netif_running(bp->dev)) {
706bf240 505 netif_tx_wake_all_queues(bp->dev);
35efa7c1 506 bnx2_napi_enable(bp);
b6016b76
MC
507 bnx2_enable_int(bp);
508 }
509 }
510}
511
35e9010b
MC
512static void
513bnx2_free_tx_mem(struct bnx2 *bp)
514{
515 int i;
516
517 for (i = 0; i < bp->num_tx_rings; i++) {
518 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
519 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
520
521 if (txr->tx_desc_ring) {
522 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
523 txr->tx_desc_ring,
524 txr->tx_desc_mapping);
525 txr->tx_desc_ring = NULL;
526 }
527 kfree(txr->tx_buf_ring);
528 txr->tx_buf_ring = NULL;
529 }
530}
531
bb4f98ab
MC
532static void
533bnx2_free_rx_mem(struct bnx2 *bp)
534{
535 int i;
536
537 for (i = 0; i < bp->num_rx_rings; i++) {
538 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
539 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
540 int j;
541
542 for (j = 0; j < bp->rx_max_ring; j++) {
543 if (rxr->rx_desc_ring[j])
544 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
545 rxr->rx_desc_ring[j],
546 rxr->rx_desc_mapping[j]);
547 rxr->rx_desc_ring[j] = NULL;
548 }
549 if (rxr->rx_buf_ring)
550 vfree(rxr->rx_buf_ring);
551 rxr->rx_buf_ring = NULL;
552
553 for (j = 0; j < bp->rx_max_pg_ring; j++) {
554 if (rxr->rx_pg_desc_ring[j])
555 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
3298a738
MC
556 rxr->rx_pg_desc_ring[j],
557 rxr->rx_pg_desc_mapping[j]);
558 rxr->rx_pg_desc_ring[j] = NULL;
bb4f98ab
MC
559 }
560 if (rxr->rx_pg_ring)
561 vfree(rxr->rx_pg_ring);
562 rxr->rx_pg_ring = NULL;
563 }
564}
565
35e9010b
MC
566static int
567bnx2_alloc_tx_mem(struct bnx2 *bp)
568{
569 int i;
570
571 for (i = 0; i < bp->num_tx_rings; i++) {
572 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
573 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
574
575 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
576 if (txr->tx_buf_ring == NULL)
577 return -ENOMEM;
578
579 txr->tx_desc_ring =
580 pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
581 &txr->tx_desc_mapping);
582 if (txr->tx_desc_ring == NULL)
583 return -ENOMEM;
584 }
585 return 0;
586}
587
bb4f98ab
MC
588static int
589bnx2_alloc_rx_mem(struct bnx2 *bp)
590{
591 int i;
592
593 for (i = 0; i < bp->num_rx_rings; i++) {
594 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
595 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
596 int j;
597
598 rxr->rx_buf_ring =
599 vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
600 if (rxr->rx_buf_ring == NULL)
601 return -ENOMEM;
602
603 memset(rxr->rx_buf_ring, 0,
604 SW_RXBD_RING_SIZE * bp->rx_max_ring);
605
606 for (j = 0; j < bp->rx_max_ring; j++) {
607 rxr->rx_desc_ring[j] =
608 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
609 &rxr->rx_desc_mapping[j]);
610 if (rxr->rx_desc_ring[j] == NULL)
611 return -ENOMEM;
612
613 }
614
615 if (bp->rx_pg_ring_size) {
616 rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
617 bp->rx_max_pg_ring);
618 if (rxr->rx_pg_ring == NULL)
619 return -ENOMEM;
620
621 memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
622 bp->rx_max_pg_ring);
623 }
624
625 for (j = 0; j < bp->rx_max_pg_ring; j++) {
626 rxr->rx_pg_desc_ring[j] =
627 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
628 &rxr->rx_pg_desc_mapping[j]);
629 if (rxr->rx_pg_desc_ring[j] == NULL)
630 return -ENOMEM;
631
632 }
633 }
634 return 0;
635}
636
b6016b76
MC
637static void
638bnx2_free_mem(struct bnx2 *bp)
639{
13daffa2 640 int i;
43e80b89 641 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
13daffa2 642
35e9010b 643 bnx2_free_tx_mem(bp);
bb4f98ab 644 bnx2_free_rx_mem(bp);
35e9010b 645
59b47d8a
MC
646 for (i = 0; i < bp->ctx_pages; i++) {
647 if (bp->ctx_blk[i]) {
648 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
649 bp->ctx_blk[i],
650 bp->ctx_blk_mapping[i]);
651 bp->ctx_blk[i] = NULL;
652 }
653 }
43e80b89 654 if (bnapi->status_blk.msi) {
0f31f994 655 pci_free_consistent(bp->pdev, bp->status_stats_size,
43e80b89
MC
656 bnapi->status_blk.msi,
657 bp->status_blk_mapping);
658 bnapi->status_blk.msi = NULL;
0f31f994 659 bp->stats_blk = NULL;
b6016b76 660 }
b6016b76
MC
661}
662
663static int
664bnx2_alloc_mem(struct bnx2 *bp)
665{
35e9010b 666 int i, status_blk_size, err;
43e80b89
MC
667 struct bnx2_napi *bnapi;
668 void *status_blk;
b6016b76 669
0f31f994
MC
670 /* Combine status and statistics blocks into one allocation. */
671 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
f86e82fb 672 if (bp->flags & BNX2_FLAG_MSIX_CAP)
b4b36042
MC
673 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
674 BNX2_SBLK_MSIX_ALIGN_SIZE);
0f31f994
MC
675 bp->status_stats_size = status_blk_size +
676 sizeof(struct statistics_block);
677
43e80b89
MC
678 status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
679 &bp->status_blk_mapping);
680 if (status_blk == NULL)
b6016b76
MC
681 goto alloc_mem_err;
682
43e80b89 683 memset(status_blk, 0, bp->status_stats_size);
b6016b76 684
43e80b89
MC
685 bnapi = &bp->bnx2_napi[0];
686 bnapi->status_blk.msi = status_blk;
687 bnapi->hw_tx_cons_ptr =
688 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
689 bnapi->hw_rx_cons_ptr =
690 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
f86e82fb 691 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
b4b36042 692 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
43e80b89
MC
693 struct status_block_msix *sblk;
694
695 bnapi = &bp->bnx2_napi[i];
b4b36042 696
43e80b89
MC
697 sblk = (void *) (status_blk +
698 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
699 bnapi->status_blk.msix = sblk;
700 bnapi->hw_tx_cons_ptr =
701 &sblk->status_tx_quick_consumer_index;
702 bnapi->hw_rx_cons_ptr =
703 &sblk->status_rx_quick_consumer_index;
b4b36042
MC
704 bnapi->int_num = i << 24;
705 }
706 }
35efa7c1 707
43e80b89 708 bp->stats_blk = status_blk + status_blk_size;
b6016b76 709
0f31f994 710 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
b6016b76 711
59b47d8a
MC
712 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
713 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
714 if (bp->ctx_pages == 0)
715 bp->ctx_pages = 1;
716 for (i = 0; i < bp->ctx_pages; i++) {
717 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
718 BCM_PAGE_SIZE,
719 &bp->ctx_blk_mapping[i]);
720 if (bp->ctx_blk[i] == NULL)
721 goto alloc_mem_err;
722 }
723 }
35e9010b 724
bb4f98ab
MC
725 err = bnx2_alloc_rx_mem(bp);
726 if (err)
727 goto alloc_mem_err;
728
35e9010b
MC
729 err = bnx2_alloc_tx_mem(bp);
730 if (err)
731 goto alloc_mem_err;
732
b6016b76
MC
733 return 0;
734
735alloc_mem_err:
736 bnx2_free_mem(bp);
737 return -ENOMEM;
738}
739
e3648b3d
MC
740static void
741bnx2_report_fw_link(struct bnx2 *bp)
742{
743 u32 fw_link_status = 0;
744
583c28e5 745 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
746 return;
747
e3648b3d
MC
748 if (bp->link_up) {
749 u32 bmsr;
750
751 switch (bp->line_speed) {
752 case SPEED_10:
753 if (bp->duplex == DUPLEX_HALF)
754 fw_link_status = BNX2_LINK_STATUS_10HALF;
755 else
756 fw_link_status = BNX2_LINK_STATUS_10FULL;
757 break;
758 case SPEED_100:
759 if (bp->duplex == DUPLEX_HALF)
760 fw_link_status = BNX2_LINK_STATUS_100HALF;
761 else
762 fw_link_status = BNX2_LINK_STATUS_100FULL;
763 break;
764 case SPEED_1000:
765 if (bp->duplex == DUPLEX_HALF)
766 fw_link_status = BNX2_LINK_STATUS_1000HALF;
767 else
768 fw_link_status = BNX2_LINK_STATUS_1000FULL;
769 break;
770 case SPEED_2500:
771 if (bp->duplex == DUPLEX_HALF)
772 fw_link_status = BNX2_LINK_STATUS_2500HALF;
773 else
774 fw_link_status = BNX2_LINK_STATUS_2500FULL;
775 break;
776 }
777
778 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
779
780 if (bp->autoneg) {
781 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
782
ca58c3af
MC
783 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
784 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
e3648b3d
MC
785
786 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
583c28e5 787 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
e3648b3d
MC
788 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
789 else
790 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
791 }
792 }
793 else
794 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
795
2726d6e1 796 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
e3648b3d
MC
797}
798
9b1084b8
MC
799static char *
800bnx2_xceiver_str(struct bnx2 *bp)
801{
802 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
583c28e5 803 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
9b1084b8
MC
804 "Copper"));
805}
806
b6016b76
MC
807static void
808bnx2_report_link(struct bnx2 *bp)
809{
810 if (bp->link_up) {
811 netif_carrier_on(bp->dev);
9b1084b8
MC
812 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
813 bnx2_xceiver_str(bp));
b6016b76
MC
814
815 printk("%d Mbps ", bp->line_speed);
816
817 if (bp->duplex == DUPLEX_FULL)
818 printk("full duplex");
819 else
820 printk("half duplex");
821
822 if (bp->flow_ctrl) {
823 if (bp->flow_ctrl & FLOW_CTRL_RX) {
824 printk(", receive ");
825 if (bp->flow_ctrl & FLOW_CTRL_TX)
826 printk("& transmit ");
827 }
828 else {
829 printk(", transmit ");
830 }
831 printk("flow control ON");
832 }
833 printk("\n");
834 }
835 else {
836 netif_carrier_off(bp->dev);
9b1084b8
MC
837 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
838 bnx2_xceiver_str(bp));
b6016b76 839 }
e3648b3d
MC
840
841 bnx2_report_fw_link(bp);
b6016b76
MC
842}
843
844static void
845bnx2_resolve_flow_ctrl(struct bnx2 *bp)
846{
847 u32 local_adv, remote_adv;
848
849 bp->flow_ctrl = 0;
6aa20a22 850 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
b6016b76
MC
851 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
852
853 if (bp->duplex == DUPLEX_FULL) {
854 bp->flow_ctrl = bp->req_flow_ctrl;
855 }
856 return;
857 }
858
859 if (bp->duplex != DUPLEX_FULL) {
860 return;
861 }
862
583c28e5 863 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
5b0c76ad
MC
864 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
865 u32 val;
866
867 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
868 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
869 bp->flow_ctrl |= FLOW_CTRL_TX;
870 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
871 bp->flow_ctrl |= FLOW_CTRL_RX;
872 return;
873 }
874
ca58c3af
MC
875 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
876 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76 877
583c28e5 878 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
b6016b76
MC
879 u32 new_local_adv = 0;
880 u32 new_remote_adv = 0;
881
882 if (local_adv & ADVERTISE_1000XPAUSE)
883 new_local_adv |= ADVERTISE_PAUSE_CAP;
884 if (local_adv & ADVERTISE_1000XPSE_ASYM)
885 new_local_adv |= ADVERTISE_PAUSE_ASYM;
886 if (remote_adv & ADVERTISE_1000XPAUSE)
887 new_remote_adv |= ADVERTISE_PAUSE_CAP;
888 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
889 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
890
891 local_adv = new_local_adv;
892 remote_adv = new_remote_adv;
893 }
894
895 /* See Table 28B-3 of 802.3ab-1999 spec. */
896 if (local_adv & ADVERTISE_PAUSE_CAP) {
897 if(local_adv & ADVERTISE_PAUSE_ASYM) {
898 if (remote_adv & ADVERTISE_PAUSE_CAP) {
899 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
900 }
901 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
902 bp->flow_ctrl = FLOW_CTRL_RX;
903 }
904 }
905 else {
906 if (remote_adv & ADVERTISE_PAUSE_CAP) {
907 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
908 }
909 }
910 }
911 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
912 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
913 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
914
915 bp->flow_ctrl = FLOW_CTRL_TX;
916 }
917 }
918}
919
27a005b8
MC
920static int
921bnx2_5709s_linkup(struct bnx2 *bp)
922{
923 u32 val, speed;
924
925 bp->link_up = 1;
926
927 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
928 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
929 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
930
931 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
932 bp->line_speed = bp->req_line_speed;
933 bp->duplex = bp->req_duplex;
934 return 0;
935 }
936 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
937 switch (speed) {
938 case MII_BNX2_GP_TOP_AN_SPEED_10:
939 bp->line_speed = SPEED_10;
940 break;
941 case MII_BNX2_GP_TOP_AN_SPEED_100:
942 bp->line_speed = SPEED_100;
943 break;
944 case MII_BNX2_GP_TOP_AN_SPEED_1G:
945 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
946 bp->line_speed = SPEED_1000;
947 break;
948 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
949 bp->line_speed = SPEED_2500;
950 break;
951 }
952 if (val & MII_BNX2_GP_TOP_AN_FD)
953 bp->duplex = DUPLEX_FULL;
954 else
955 bp->duplex = DUPLEX_HALF;
956 return 0;
957}
958
b6016b76 959static int
5b0c76ad
MC
960bnx2_5708s_linkup(struct bnx2 *bp)
961{
962 u32 val;
963
964 bp->link_up = 1;
965 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
966 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
967 case BCM5708S_1000X_STAT1_SPEED_10:
968 bp->line_speed = SPEED_10;
969 break;
970 case BCM5708S_1000X_STAT1_SPEED_100:
971 bp->line_speed = SPEED_100;
972 break;
973 case BCM5708S_1000X_STAT1_SPEED_1G:
974 bp->line_speed = SPEED_1000;
975 break;
976 case BCM5708S_1000X_STAT1_SPEED_2G5:
977 bp->line_speed = SPEED_2500;
978 break;
979 }
980 if (val & BCM5708S_1000X_STAT1_FD)
981 bp->duplex = DUPLEX_FULL;
982 else
983 bp->duplex = DUPLEX_HALF;
984
985 return 0;
986}
987
988static int
989bnx2_5706s_linkup(struct bnx2 *bp)
b6016b76
MC
990{
991 u32 bmcr, local_adv, remote_adv, common;
992
993 bp->link_up = 1;
994 bp->line_speed = SPEED_1000;
995
ca58c3af 996 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
997 if (bmcr & BMCR_FULLDPLX) {
998 bp->duplex = DUPLEX_FULL;
999 }
1000 else {
1001 bp->duplex = DUPLEX_HALF;
1002 }
1003
1004 if (!(bmcr & BMCR_ANENABLE)) {
1005 return 0;
1006 }
1007
ca58c3af
MC
1008 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1009 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
1010
1011 common = local_adv & remote_adv;
1012 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1013
1014 if (common & ADVERTISE_1000XFULL) {
1015 bp->duplex = DUPLEX_FULL;
1016 }
1017 else {
1018 bp->duplex = DUPLEX_HALF;
1019 }
1020 }
1021
1022 return 0;
1023}
1024
1025static int
1026bnx2_copper_linkup(struct bnx2 *bp)
1027{
1028 u32 bmcr;
1029
ca58c3af 1030 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1031 if (bmcr & BMCR_ANENABLE) {
1032 u32 local_adv, remote_adv, common;
1033
1034 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1035 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1036
1037 common = local_adv & (remote_adv >> 2);
1038 if (common & ADVERTISE_1000FULL) {
1039 bp->line_speed = SPEED_1000;
1040 bp->duplex = DUPLEX_FULL;
1041 }
1042 else if (common & ADVERTISE_1000HALF) {
1043 bp->line_speed = SPEED_1000;
1044 bp->duplex = DUPLEX_HALF;
1045 }
1046 else {
ca58c3af
MC
1047 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1048 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
1049
1050 common = local_adv & remote_adv;
1051 if (common & ADVERTISE_100FULL) {
1052 bp->line_speed = SPEED_100;
1053 bp->duplex = DUPLEX_FULL;
1054 }
1055 else if (common & ADVERTISE_100HALF) {
1056 bp->line_speed = SPEED_100;
1057 bp->duplex = DUPLEX_HALF;
1058 }
1059 else if (common & ADVERTISE_10FULL) {
1060 bp->line_speed = SPEED_10;
1061 bp->duplex = DUPLEX_FULL;
1062 }
1063 else if (common & ADVERTISE_10HALF) {
1064 bp->line_speed = SPEED_10;
1065 bp->duplex = DUPLEX_HALF;
1066 }
1067 else {
1068 bp->line_speed = 0;
1069 bp->link_up = 0;
1070 }
1071 }
1072 }
1073 else {
1074 if (bmcr & BMCR_SPEED100) {
1075 bp->line_speed = SPEED_100;
1076 }
1077 else {
1078 bp->line_speed = SPEED_10;
1079 }
1080 if (bmcr & BMCR_FULLDPLX) {
1081 bp->duplex = DUPLEX_FULL;
1082 }
1083 else {
1084 bp->duplex = DUPLEX_HALF;
1085 }
1086 }
1087
1088 return 0;
1089}
1090
83e3fc89 1091static void
bb4f98ab 1092bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
83e3fc89 1093{
bb4f98ab 1094 u32 val, rx_cid_addr = GET_CID_ADDR(cid);
83e3fc89
MC
1095
1096 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1097 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1098 val |= 0x02 << 8;
1099
1100 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1101 u32 lo_water, hi_water;
1102
1103 if (bp->flow_ctrl & FLOW_CTRL_TX)
1104 lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1105 else
1106 lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1107 if (lo_water >= bp->rx_ring_size)
1108 lo_water = 0;
1109
1110 hi_water = bp->rx_ring_size / 4;
1111
1112 if (hi_water <= lo_water)
1113 lo_water = 0;
1114
1115 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1116 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1117
1118 if (hi_water > 0xf)
1119 hi_water = 0xf;
1120 else if (hi_water == 0)
1121 lo_water = 0;
1122 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1123 }
1124 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1125}
1126
bb4f98ab
MC
1127static void
1128bnx2_init_all_rx_contexts(struct bnx2 *bp)
1129{
1130 int i;
1131 u32 cid;
1132
1133 for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1134 if (i == 1)
1135 cid = RX_RSS_CID;
1136 bnx2_init_rx_context(bp, cid);
1137 }
1138}
1139
344478db 1140static void
b6016b76
MC
1141bnx2_set_mac_link(struct bnx2 *bp)
1142{
1143 u32 val;
1144
1145 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1146 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1147 (bp->duplex == DUPLEX_HALF)) {
1148 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1149 }
1150
1151 /* Configure the EMAC mode register. */
1152 val = REG_RD(bp, BNX2_EMAC_MODE);
1153
1154 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
5b0c76ad 1155 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
59b47d8a 1156 BNX2_EMAC_MODE_25G_MODE);
b6016b76
MC
1157
1158 if (bp->link_up) {
5b0c76ad
MC
1159 switch (bp->line_speed) {
1160 case SPEED_10:
59b47d8a
MC
1161 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1162 val |= BNX2_EMAC_MODE_PORT_MII_10M;
5b0c76ad
MC
1163 break;
1164 }
1165 /* fall through */
1166 case SPEED_100:
1167 val |= BNX2_EMAC_MODE_PORT_MII;
1168 break;
1169 case SPEED_2500:
59b47d8a 1170 val |= BNX2_EMAC_MODE_25G_MODE;
5b0c76ad
MC
1171 /* fall through */
1172 case SPEED_1000:
1173 val |= BNX2_EMAC_MODE_PORT_GMII;
1174 break;
1175 }
b6016b76
MC
1176 }
1177 else {
1178 val |= BNX2_EMAC_MODE_PORT_GMII;
1179 }
1180
1181 /* Set the MAC to operate in the appropriate duplex mode. */
1182 if (bp->duplex == DUPLEX_HALF)
1183 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1184 REG_WR(bp, BNX2_EMAC_MODE, val);
1185
1186 /* Enable/disable rx PAUSE. */
1187 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1188
1189 if (bp->flow_ctrl & FLOW_CTRL_RX)
1190 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1191 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1192
1193 /* Enable/disable tx PAUSE. */
1194 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1195 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1196
1197 if (bp->flow_ctrl & FLOW_CTRL_TX)
1198 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1199 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1200
1201 /* Acknowledge the interrupt. */
1202 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1203
83e3fc89 1204 if (CHIP_NUM(bp) == CHIP_NUM_5709)
bb4f98ab 1205 bnx2_init_all_rx_contexts(bp);
b6016b76
MC
1206}
1207
27a005b8
MC
1208static void
1209bnx2_enable_bmsr1(struct bnx2 *bp)
1210{
583c28e5 1211 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
27a005b8
MC
1212 (CHIP_NUM(bp) == CHIP_NUM_5709))
1213 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1214 MII_BNX2_BLK_ADDR_GP_STATUS);
1215}
1216
1217static void
1218bnx2_disable_bmsr1(struct bnx2 *bp)
1219{
583c28e5 1220 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
27a005b8
MC
1221 (CHIP_NUM(bp) == CHIP_NUM_5709))
1222 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1223 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1224}
1225
605a9e20
MC
1226static int
1227bnx2_test_and_enable_2g5(struct bnx2 *bp)
1228{
1229 u32 up1;
1230 int ret = 1;
1231
583c28e5 1232 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
605a9e20
MC
1233 return 0;
1234
1235 if (bp->autoneg & AUTONEG_SPEED)
1236 bp->advertising |= ADVERTISED_2500baseX_Full;
1237
27a005b8
MC
1238 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1239 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1240
605a9e20
MC
1241 bnx2_read_phy(bp, bp->mii_up1, &up1);
1242 if (!(up1 & BCM5708S_UP1_2G5)) {
1243 up1 |= BCM5708S_UP1_2G5;
1244 bnx2_write_phy(bp, bp->mii_up1, up1);
1245 ret = 0;
1246 }
1247
27a005b8
MC
1248 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1249 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1250 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1251
605a9e20
MC
1252 return ret;
1253}
1254
1255static int
1256bnx2_test_and_disable_2g5(struct bnx2 *bp)
1257{
1258 u32 up1;
1259 int ret = 0;
1260
583c28e5 1261 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
605a9e20
MC
1262 return 0;
1263
27a005b8
MC
1264 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1265 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1266
605a9e20
MC
1267 bnx2_read_phy(bp, bp->mii_up1, &up1);
1268 if (up1 & BCM5708S_UP1_2G5) {
1269 up1 &= ~BCM5708S_UP1_2G5;
1270 bnx2_write_phy(bp, bp->mii_up1, up1);
1271 ret = 1;
1272 }
1273
27a005b8
MC
1274 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1275 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1276 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1277
605a9e20
MC
1278 return ret;
1279}
1280
1281static void
1282bnx2_enable_forced_2g5(struct bnx2 *bp)
1283{
1284 u32 bmcr;
1285
583c28e5 1286 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
605a9e20
MC
1287 return;
1288
27a005b8
MC
1289 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1290 u32 val;
1291
1292 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1293 MII_BNX2_BLK_ADDR_SERDES_DIG);
1294 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1295 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1296 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1297 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1298
1299 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1300 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1301 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1302
1303 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1304 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1305 bmcr |= BCM5708S_BMCR_FORCE_2500;
1306 }
1307
1308 if (bp->autoneg & AUTONEG_SPEED) {
1309 bmcr &= ~BMCR_ANENABLE;
1310 if (bp->req_duplex == DUPLEX_FULL)
1311 bmcr |= BMCR_FULLDPLX;
1312 }
1313 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1314}
1315
1316static void
1317bnx2_disable_forced_2g5(struct bnx2 *bp)
1318{
1319 u32 bmcr;
1320
583c28e5 1321 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
605a9e20
MC
1322 return;
1323
27a005b8
MC
1324 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1325 u32 val;
1326
1327 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1328 MII_BNX2_BLK_ADDR_SERDES_DIG);
1329 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1330 val &= ~MII_BNX2_SD_MISC1_FORCE;
1331 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1332
1333 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1334 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1335 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1336
1337 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1338 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1339 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1340 }
1341
1342 if (bp->autoneg & AUTONEG_SPEED)
1343 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1344 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1345}
1346
b2fadeae
MC
1347static void
1348bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1349{
1350 u32 val;
1351
1352 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1353 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1354 if (start)
1355 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1356 else
1357 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1358}
1359
b6016b76
MC
1360static int
1361bnx2_set_link(struct bnx2 *bp)
1362{
1363 u32 bmsr;
1364 u8 link_up;
1365
80be4434 1366 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
b6016b76
MC
1367 bp->link_up = 1;
1368 return 0;
1369 }
1370
583c28e5 1371 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
1372 return 0;
1373
b6016b76
MC
1374 link_up = bp->link_up;
1375
27a005b8
MC
1376 bnx2_enable_bmsr1(bp);
1377 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1378 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1379 bnx2_disable_bmsr1(bp);
b6016b76 1380
583c28e5 1381 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
b6016b76 1382 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
a2724e25 1383 u32 val, an_dbg;
b6016b76 1384
583c28e5 1385 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
b2fadeae 1386 bnx2_5706s_force_link_dn(bp, 0);
583c28e5 1387 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
b2fadeae 1388 }
b6016b76 1389 val = REG_RD(bp, BNX2_EMAC_STATUS);
a2724e25
MC
1390
1391 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1392 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1393 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1394
1395 if ((val & BNX2_EMAC_STATUS_LINK) &&
1396 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
b6016b76
MC
1397 bmsr |= BMSR_LSTATUS;
1398 else
1399 bmsr &= ~BMSR_LSTATUS;
1400 }
1401
1402 if (bmsr & BMSR_LSTATUS) {
1403 bp->link_up = 1;
1404
583c28e5 1405 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5b0c76ad
MC
1406 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1407 bnx2_5706s_linkup(bp);
1408 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1409 bnx2_5708s_linkup(bp);
27a005b8
MC
1410 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1411 bnx2_5709s_linkup(bp);
b6016b76
MC
1412 }
1413 else {
1414 bnx2_copper_linkup(bp);
1415 }
1416 bnx2_resolve_flow_ctrl(bp);
1417 }
1418 else {
583c28e5 1419 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
605a9e20
MC
1420 (bp->autoneg & AUTONEG_SPEED))
1421 bnx2_disable_forced_2g5(bp);
b6016b76 1422
583c28e5 1423 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
b2fadeae
MC
1424 u32 bmcr;
1425
1426 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1427 bmcr |= BMCR_ANENABLE;
1428 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1429
583c28e5 1430 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
b2fadeae 1431 }
b6016b76
MC
1432 bp->link_up = 0;
1433 }
1434
1435 if (bp->link_up != link_up) {
1436 bnx2_report_link(bp);
1437 }
1438
1439 bnx2_set_mac_link(bp);
1440
1441 return 0;
1442}
1443
1444static int
1445bnx2_reset_phy(struct bnx2 *bp)
1446{
1447 int i;
1448 u32 reg;
1449
ca58c3af 1450 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
b6016b76
MC
1451
1452#define PHY_RESET_MAX_WAIT 100
1453 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1454 udelay(10);
1455
ca58c3af 1456 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
b6016b76
MC
1457 if (!(reg & BMCR_RESET)) {
1458 udelay(20);
1459 break;
1460 }
1461 }
1462 if (i == PHY_RESET_MAX_WAIT) {
1463 return -EBUSY;
1464 }
1465 return 0;
1466}
1467
1468static u32
1469bnx2_phy_get_pause_adv(struct bnx2 *bp)
1470{
1471 u32 adv = 0;
1472
1473 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1474 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1475
583c28e5 1476 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
b6016b76
MC
1477 adv = ADVERTISE_1000XPAUSE;
1478 }
1479 else {
1480 adv = ADVERTISE_PAUSE_CAP;
1481 }
1482 }
1483 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
583c28e5 1484 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
b6016b76
MC
1485 adv = ADVERTISE_1000XPSE_ASYM;
1486 }
1487 else {
1488 adv = ADVERTISE_PAUSE_ASYM;
1489 }
1490 }
1491 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
583c28e5 1492 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
b6016b76
MC
1493 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1494 }
1495 else {
1496 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1497 }
1498 }
1499 return adv;
1500}
1501
a2f13890 1502static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
0d8a6571 1503
b6016b76 1504static int
0d8a6571 1505bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
52d07b1f
HH
1506__releases(&bp->phy_lock)
1507__acquires(&bp->phy_lock)
0d8a6571
MC
1508{
1509 u32 speed_arg = 0, pause_adv;
1510
1511 pause_adv = bnx2_phy_get_pause_adv(bp);
1512
1513 if (bp->autoneg & AUTONEG_SPEED) {
1514 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1515 if (bp->advertising & ADVERTISED_10baseT_Half)
1516 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1517 if (bp->advertising & ADVERTISED_10baseT_Full)
1518 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1519 if (bp->advertising & ADVERTISED_100baseT_Half)
1520 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1521 if (bp->advertising & ADVERTISED_100baseT_Full)
1522 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1523 if (bp->advertising & ADVERTISED_1000baseT_Full)
1524 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1525 if (bp->advertising & ADVERTISED_2500baseX_Full)
1526 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1527 } else {
1528 if (bp->req_line_speed == SPEED_2500)
1529 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1530 else if (bp->req_line_speed == SPEED_1000)
1531 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1532 else if (bp->req_line_speed == SPEED_100) {
1533 if (bp->req_duplex == DUPLEX_FULL)
1534 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1535 else
1536 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1537 } else if (bp->req_line_speed == SPEED_10) {
1538 if (bp->req_duplex == DUPLEX_FULL)
1539 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1540 else
1541 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1542 }
1543 }
1544
1545 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1546 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
c26736ec 1547 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
0d8a6571
MC
1548 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1549
1550 if (port == PORT_TP)
1551 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1552 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1553
2726d6e1 1554 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
0d8a6571
MC
1555
1556 spin_unlock_bh(&bp->phy_lock);
a2f13890 1557 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
0d8a6571
MC
1558 spin_lock_bh(&bp->phy_lock);
1559
1560 return 0;
1561}
1562
1563static int
1564bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
52d07b1f
HH
1565__releases(&bp->phy_lock)
1566__acquires(&bp->phy_lock)
b6016b76 1567{
605a9e20 1568 u32 adv, bmcr;
b6016b76
MC
1569 u32 new_adv = 0;
1570
583c28e5 1571 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
1572 return (bnx2_setup_remote_phy(bp, port));
1573
b6016b76
MC
1574 if (!(bp->autoneg & AUTONEG_SPEED)) {
1575 u32 new_bmcr;
5b0c76ad
MC
1576 int force_link_down = 0;
1577
605a9e20
MC
1578 if (bp->req_line_speed == SPEED_2500) {
1579 if (!bnx2_test_and_enable_2g5(bp))
1580 force_link_down = 1;
1581 } else if (bp->req_line_speed == SPEED_1000) {
1582 if (bnx2_test_and_disable_2g5(bp))
1583 force_link_down = 1;
1584 }
ca58c3af 1585 bnx2_read_phy(bp, bp->mii_adv, &adv);
80be4434
MC
1586 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1587
ca58c3af 1588 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
605a9e20 1589 new_bmcr = bmcr & ~BMCR_ANENABLE;
80be4434 1590 new_bmcr |= BMCR_SPEED1000;
605a9e20 1591
27a005b8
MC
1592 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1593 if (bp->req_line_speed == SPEED_2500)
1594 bnx2_enable_forced_2g5(bp);
1595 else if (bp->req_line_speed == SPEED_1000) {
1596 bnx2_disable_forced_2g5(bp);
1597 new_bmcr &= ~0x2000;
1598 }
1599
1600 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1601 if (bp->req_line_speed == SPEED_2500)
1602 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1603 else
1604 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
5b0c76ad
MC
1605 }
1606
b6016b76 1607 if (bp->req_duplex == DUPLEX_FULL) {
5b0c76ad 1608 adv |= ADVERTISE_1000XFULL;
b6016b76
MC
1609 new_bmcr |= BMCR_FULLDPLX;
1610 }
1611 else {
5b0c76ad 1612 adv |= ADVERTISE_1000XHALF;
b6016b76
MC
1613 new_bmcr &= ~BMCR_FULLDPLX;
1614 }
5b0c76ad 1615 if ((new_bmcr != bmcr) || (force_link_down)) {
b6016b76
MC
1616 /* Force a link down visible on the other side */
1617 if (bp->link_up) {
ca58c3af 1618 bnx2_write_phy(bp, bp->mii_adv, adv &
5b0c76ad
MC
1619 ~(ADVERTISE_1000XFULL |
1620 ADVERTISE_1000XHALF));
ca58c3af 1621 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
b6016b76
MC
1622 BMCR_ANRESTART | BMCR_ANENABLE);
1623
1624 bp->link_up = 0;
1625 netif_carrier_off(bp->dev);
ca58c3af 1626 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
80be4434 1627 bnx2_report_link(bp);
b6016b76 1628 }
ca58c3af
MC
1629 bnx2_write_phy(bp, bp->mii_adv, adv);
1630 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
605a9e20
MC
1631 } else {
1632 bnx2_resolve_flow_ctrl(bp);
1633 bnx2_set_mac_link(bp);
b6016b76
MC
1634 }
1635 return 0;
1636 }
1637
605a9e20 1638 bnx2_test_and_enable_2g5(bp);
5b0c76ad 1639
b6016b76
MC
1640 if (bp->advertising & ADVERTISED_1000baseT_Full)
1641 new_adv |= ADVERTISE_1000XFULL;
1642
1643 new_adv |= bnx2_phy_get_pause_adv(bp);
1644
ca58c3af
MC
1645 bnx2_read_phy(bp, bp->mii_adv, &adv);
1646 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1647
1648 bp->serdes_an_pending = 0;
1649 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1650 /* Force a link down visible on the other side */
1651 if (bp->link_up) {
ca58c3af 1652 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
80be4434
MC
1653 spin_unlock_bh(&bp->phy_lock);
1654 msleep(20);
1655 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
1656 }
1657
ca58c3af
MC
1658 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1659 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
b6016b76 1660 BMCR_ANENABLE);
f8dd064e
MC
1661 /* Speed up link-up time when the link partner
1662 * does not autonegotiate which is very common
1663 * in blade servers. Some blade servers use
1664 * IPMI for kerboard input and it's important
1665 * to minimize link disruptions. Autoneg. involves
1666 * exchanging base pages plus 3 next pages and
1667 * normally completes in about 120 msec.
1668 */
40105c0b 1669 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
f8dd064e
MC
1670 bp->serdes_an_pending = 1;
1671 mod_timer(&bp->timer, jiffies + bp->current_interval);
605a9e20
MC
1672 } else {
1673 bnx2_resolve_flow_ctrl(bp);
1674 bnx2_set_mac_link(bp);
b6016b76
MC
1675 }
1676
1677 return 0;
1678}
1679
1680#define ETHTOOL_ALL_FIBRE_SPEED \
583c28e5 1681 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
deaf391b
MC
1682 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1683 (ADVERTISED_1000baseT_Full)
b6016b76
MC
1684
1685#define ETHTOOL_ALL_COPPER_SPEED \
1686 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1687 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1688 ADVERTISED_1000baseT_Full)
1689
1690#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1691 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
6aa20a22 1692
b6016b76
MC
1693#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1694
0d8a6571
MC
1695static void
1696bnx2_set_default_remote_link(struct bnx2 *bp)
1697{
1698 u32 link;
1699
1700 if (bp->phy_port == PORT_TP)
2726d6e1 1701 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
0d8a6571 1702 else
2726d6e1 1703 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
0d8a6571
MC
1704
1705 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1706 bp->req_line_speed = 0;
1707 bp->autoneg |= AUTONEG_SPEED;
1708 bp->advertising = ADVERTISED_Autoneg;
1709 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1710 bp->advertising |= ADVERTISED_10baseT_Half;
1711 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1712 bp->advertising |= ADVERTISED_10baseT_Full;
1713 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1714 bp->advertising |= ADVERTISED_100baseT_Half;
1715 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1716 bp->advertising |= ADVERTISED_100baseT_Full;
1717 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1718 bp->advertising |= ADVERTISED_1000baseT_Full;
1719 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1720 bp->advertising |= ADVERTISED_2500baseX_Full;
1721 } else {
1722 bp->autoneg = 0;
1723 bp->advertising = 0;
1724 bp->req_duplex = DUPLEX_FULL;
1725 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1726 bp->req_line_speed = SPEED_10;
1727 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1728 bp->req_duplex = DUPLEX_HALF;
1729 }
1730 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1731 bp->req_line_speed = SPEED_100;
1732 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1733 bp->req_duplex = DUPLEX_HALF;
1734 }
1735 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1736 bp->req_line_speed = SPEED_1000;
1737 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1738 bp->req_line_speed = SPEED_2500;
1739 }
1740}
1741
deaf391b
MC
1742static void
1743bnx2_set_default_link(struct bnx2 *bp)
1744{
ab59859d
HH
1745 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1746 bnx2_set_default_remote_link(bp);
1747 return;
1748 }
0d8a6571 1749
deaf391b
MC
1750 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1751 bp->req_line_speed = 0;
583c28e5 1752 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
deaf391b
MC
1753 u32 reg;
1754
1755 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1756
2726d6e1 1757 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
deaf391b
MC
1758 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1759 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1760 bp->autoneg = 0;
1761 bp->req_line_speed = bp->line_speed = SPEED_1000;
1762 bp->req_duplex = DUPLEX_FULL;
1763 }
1764 } else
1765 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1766}
1767
df149d70
MC
1768static void
1769bnx2_send_heart_beat(struct bnx2 *bp)
1770{
1771 u32 msg;
1772 u32 addr;
1773
1774 spin_lock(&bp->indirect_lock);
1775 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1776 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1777 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1778 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1779 spin_unlock(&bp->indirect_lock);
1780}
1781
0d8a6571
MC
1782static void
1783bnx2_remote_phy_event(struct bnx2 *bp)
1784{
1785 u32 msg;
1786 u8 link_up = bp->link_up;
1787 u8 old_port;
1788
2726d6e1 1789 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
0d8a6571 1790
df149d70
MC
1791 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1792 bnx2_send_heart_beat(bp);
1793
1794 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1795
0d8a6571
MC
1796 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1797 bp->link_up = 0;
1798 else {
1799 u32 speed;
1800
1801 bp->link_up = 1;
1802 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1803 bp->duplex = DUPLEX_FULL;
1804 switch (speed) {
1805 case BNX2_LINK_STATUS_10HALF:
1806 bp->duplex = DUPLEX_HALF;
1807 case BNX2_LINK_STATUS_10FULL:
1808 bp->line_speed = SPEED_10;
1809 break;
1810 case BNX2_LINK_STATUS_100HALF:
1811 bp->duplex = DUPLEX_HALF;
1812 case BNX2_LINK_STATUS_100BASE_T4:
1813 case BNX2_LINK_STATUS_100FULL:
1814 bp->line_speed = SPEED_100;
1815 break;
1816 case BNX2_LINK_STATUS_1000HALF:
1817 bp->duplex = DUPLEX_HALF;
1818 case BNX2_LINK_STATUS_1000FULL:
1819 bp->line_speed = SPEED_1000;
1820 break;
1821 case BNX2_LINK_STATUS_2500HALF:
1822 bp->duplex = DUPLEX_HALF;
1823 case BNX2_LINK_STATUS_2500FULL:
1824 bp->line_speed = SPEED_2500;
1825 break;
1826 default:
1827 bp->line_speed = 0;
1828 break;
1829 }
1830
0d8a6571
MC
1831 bp->flow_ctrl = 0;
1832 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1833 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1834 if (bp->duplex == DUPLEX_FULL)
1835 bp->flow_ctrl = bp->req_flow_ctrl;
1836 } else {
1837 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1838 bp->flow_ctrl |= FLOW_CTRL_TX;
1839 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1840 bp->flow_ctrl |= FLOW_CTRL_RX;
1841 }
1842
1843 old_port = bp->phy_port;
1844 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1845 bp->phy_port = PORT_FIBRE;
1846 else
1847 bp->phy_port = PORT_TP;
1848
1849 if (old_port != bp->phy_port)
1850 bnx2_set_default_link(bp);
1851
0d8a6571
MC
1852 }
1853 if (bp->link_up != link_up)
1854 bnx2_report_link(bp);
1855
1856 bnx2_set_mac_link(bp);
1857}
1858
1859static int
1860bnx2_set_remote_link(struct bnx2 *bp)
1861{
1862 u32 evt_code;
1863
2726d6e1 1864 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
0d8a6571
MC
1865 switch (evt_code) {
1866 case BNX2_FW_EVT_CODE_LINK_EVENT:
1867 bnx2_remote_phy_event(bp);
1868 break;
1869 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1870 default:
df149d70 1871 bnx2_send_heart_beat(bp);
0d8a6571
MC
1872 break;
1873 }
1874 return 0;
1875}
1876
b6016b76
MC
1877static int
1878bnx2_setup_copper_phy(struct bnx2 *bp)
52d07b1f
HH
1879__releases(&bp->phy_lock)
1880__acquires(&bp->phy_lock)
b6016b76
MC
1881{
1882 u32 bmcr;
1883 u32 new_bmcr;
1884
ca58c3af 1885 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1886
1887 if (bp->autoneg & AUTONEG_SPEED) {
1888 u32 adv_reg, adv1000_reg;
1889 u32 new_adv_reg = 0;
1890 u32 new_adv1000_reg = 0;
1891
ca58c3af 1892 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
b6016b76
MC
1893 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1894 ADVERTISE_PAUSE_ASYM);
1895
1896 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1897 adv1000_reg &= PHY_ALL_1000_SPEED;
1898
1899 if (bp->advertising & ADVERTISED_10baseT_Half)
1900 new_adv_reg |= ADVERTISE_10HALF;
1901 if (bp->advertising & ADVERTISED_10baseT_Full)
1902 new_adv_reg |= ADVERTISE_10FULL;
1903 if (bp->advertising & ADVERTISED_100baseT_Half)
1904 new_adv_reg |= ADVERTISE_100HALF;
1905 if (bp->advertising & ADVERTISED_100baseT_Full)
1906 new_adv_reg |= ADVERTISE_100FULL;
1907 if (bp->advertising & ADVERTISED_1000baseT_Full)
1908 new_adv1000_reg |= ADVERTISE_1000FULL;
6aa20a22 1909
b6016b76
MC
1910 new_adv_reg |= ADVERTISE_CSMA;
1911
1912 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1913
1914 if ((adv1000_reg != new_adv1000_reg) ||
1915 (adv_reg != new_adv_reg) ||
1916 ((bmcr & BMCR_ANENABLE) == 0)) {
1917
ca58c3af 1918 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
b6016b76 1919 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
ca58c3af 1920 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
b6016b76
MC
1921 BMCR_ANENABLE);
1922 }
1923 else if (bp->link_up) {
1924 /* Flow ctrl may have changed from auto to forced */
1925 /* or vice-versa. */
1926
1927 bnx2_resolve_flow_ctrl(bp);
1928 bnx2_set_mac_link(bp);
1929 }
1930 return 0;
1931 }
1932
1933 new_bmcr = 0;
1934 if (bp->req_line_speed == SPEED_100) {
1935 new_bmcr |= BMCR_SPEED100;
1936 }
1937 if (bp->req_duplex == DUPLEX_FULL) {
1938 new_bmcr |= BMCR_FULLDPLX;
1939 }
1940 if (new_bmcr != bmcr) {
1941 u32 bmsr;
b6016b76 1942
ca58c3af
MC
1943 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1944 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
6aa20a22 1945
b6016b76
MC
1946 if (bmsr & BMSR_LSTATUS) {
1947 /* Force link down */
ca58c3af 1948 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
a16dda0e
MC
1949 spin_unlock_bh(&bp->phy_lock);
1950 msleep(50);
1951 spin_lock_bh(&bp->phy_lock);
1952
ca58c3af
MC
1953 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1954 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
b6016b76
MC
1955 }
1956
ca58c3af 1957 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
b6016b76
MC
1958
1959 /* Normally, the new speed is setup after the link has
1960 * gone down and up again. In some cases, link will not go
1961 * down so we need to set up the new speed here.
1962 */
1963 if (bmsr & BMSR_LSTATUS) {
1964 bp->line_speed = bp->req_line_speed;
1965 bp->duplex = bp->req_duplex;
1966 bnx2_resolve_flow_ctrl(bp);
1967 bnx2_set_mac_link(bp);
1968 }
27a005b8
MC
1969 } else {
1970 bnx2_resolve_flow_ctrl(bp);
1971 bnx2_set_mac_link(bp);
b6016b76
MC
1972 }
1973 return 0;
1974}
1975
1976static int
0d8a6571 1977bnx2_setup_phy(struct bnx2 *bp, u8 port)
52d07b1f
HH
1978__releases(&bp->phy_lock)
1979__acquires(&bp->phy_lock)
b6016b76
MC
1980{
1981 if (bp->loopback == MAC_LOOPBACK)
1982 return 0;
1983
583c28e5 1984 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
0d8a6571 1985 return (bnx2_setup_serdes_phy(bp, port));
b6016b76
MC
1986 }
1987 else {
1988 return (bnx2_setup_copper_phy(bp));
1989 }
1990}
1991
27a005b8 1992static int
9a120bc5 1993bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
27a005b8
MC
1994{
1995 u32 val;
1996
1997 bp->mii_bmcr = MII_BMCR + 0x10;
1998 bp->mii_bmsr = MII_BMSR + 0x10;
1999 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2000 bp->mii_adv = MII_ADVERTISE + 0x10;
2001 bp->mii_lpa = MII_LPA + 0x10;
2002 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2003
2004 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2005 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2006
2007 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
9a120bc5
MC
2008 if (reset_phy)
2009 bnx2_reset_phy(bp);
27a005b8
MC
2010
2011 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2012
2013 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2014 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2015 val |= MII_BNX2_SD_1000XCTL1_FIBER;
2016 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2017
2018 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2019 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
583c28e5 2020 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
27a005b8
MC
2021 val |= BCM5708S_UP1_2G5;
2022 else
2023 val &= ~BCM5708S_UP1_2G5;
2024 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2025
2026 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2027 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2028 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2029 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2030
2031 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2032
2033 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2034 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2035 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2036
2037 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2038
2039 return 0;
2040}
2041
b6016b76 2042static int
9a120bc5 2043bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
5b0c76ad
MC
2044{
2045 u32 val;
2046
9a120bc5
MC
2047 if (reset_phy)
2048 bnx2_reset_phy(bp);
27a005b8
MC
2049
2050 bp->mii_up1 = BCM5708S_UP1;
2051
5b0c76ad
MC
2052 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2053 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2054 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2055
2056 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2057 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2058 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2059
2060 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2061 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2062 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2063
583c28e5 2064 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
5b0c76ad
MC
2065 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2066 val |= BCM5708S_UP1_2G5;
2067 bnx2_write_phy(bp, BCM5708S_UP1, val);
2068 }
2069
2070 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
dda1e390
MC
2071 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2072 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
5b0c76ad
MC
2073 /* increase tx signal amplitude */
2074 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2075 BCM5708S_BLK_ADDR_TX_MISC);
2076 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2077 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2078 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2079 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2080 }
2081
2726d6e1 2082 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
5b0c76ad
MC
2083 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2084
2085 if (val) {
2086 u32 is_backplane;
2087
2726d6e1 2088 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
5b0c76ad
MC
2089 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2090 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2091 BCM5708S_BLK_ADDR_TX_MISC);
2092 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2093 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2094 BCM5708S_BLK_ADDR_DIG);
2095 }
2096 }
2097 return 0;
2098}
2099
2100static int
9a120bc5 2101bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
b6016b76 2102{
9a120bc5
MC
2103 if (reset_phy)
2104 bnx2_reset_phy(bp);
27a005b8 2105
583c28e5 2106 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
b6016b76 2107
59b47d8a
MC
2108 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2109 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
b6016b76
MC
2110
2111 if (bp->dev->mtu > 1500) {
2112 u32 val;
2113
2114 /* Set extended packet length bit */
2115 bnx2_write_phy(bp, 0x18, 0x7);
2116 bnx2_read_phy(bp, 0x18, &val);
2117 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2118
2119 bnx2_write_phy(bp, 0x1c, 0x6c00);
2120 bnx2_read_phy(bp, 0x1c, &val);
2121 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2122 }
2123 else {
2124 u32 val;
2125
2126 bnx2_write_phy(bp, 0x18, 0x7);
2127 bnx2_read_phy(bp, 0x18, &val);
2128 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2129
2130 bnx2_write_phy(bp, 0x1c, 0x6c00);
2131 bnx2_read_phy(bp, 0x1c, &val);
2132 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2133 }
2134
2135 return 0;
2136}
2137
2138static int
9a120bc5 2139bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
b6016b76 2140{
5b0c76ad
MC
2141 u32 val;
2142
9a120bc5
MC
2143 if (reset_phy)
2144 bnx2_reset_phy(bp);
27a005b8 2145
583c28e5 2146 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
b6016b76
MC
2147 bnx2_write_phy(bp, 0x18, 0x0c00);
2148 bnx2_write_phy(bp, 0x17, 0x000a);
2149 bnx2_write_phy(bp, 0x15, 0x310b);
2150 bnx2_write_phy(bp, 0x17, 0x201f);
2151 bnx2_write_phy(bp, 0x15, 0x9506);
2152 bnx2_write_phy(bp, 0x17, 0x401f);
2153 bnx2_write_phy(bp, 0x15, 0x14e2);
2154 bnx2_write_phy(bp, 0x18, 0x0400);
2155 }
2156
583c28e5 2157 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
b659f44e
MC
2158 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2159 MII_BNX2_DSP_EXPAND_REG | 0x8);
2160 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2161 val &= ~(1 << 8);
2162 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2163 }
2164
b6016b76 2165 if (bp->dev->mtu > 1500) {
b6016b76
MC
2166 /* Set extended packet length bit */
2167 bnx2_write_phy(bp, 0x18, 0x7);
2168 bnx2_read_phy(bp, 0x18, &val);
2169 bnx2_write_phy(bp, 0x18, val | 0x4000);
2170
2171 bnx2_read_phy(bp, 0x10, &val);
2172 bnx2_write_phy(bp, 0x10, val | 0x1);
2173 }
2174 else {
b6016b76
MC
2175 bnx2_write_phy(bp, 0x18, 0x7);
2176 bnx2_read_phy(bp, 0x18, &val);
2177 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2178
2179 bnx2_read_phy(bp, 0x10, &val);
2180 bnx2_write_phy(bp, 0x10, val & ~0x1);
2181 }
2182
5b0c76ad
MC
2183 /* ethernet@wirespeed */
2184 bnx2_write_phy(bp, 0x18, 0x7007);
2185 bnx2_read_phy(bp, 0x18, &val);
2186 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
b6016b76
MC
2187 return 0;
2188}
2189
2190
2191static int
9a120bc5 2192bnx2_init_phy(struct bnx2 *bp, int reset_phy)
52d07b1f
HH
2193__releases(&bp->phy_lock)
2194__acquires(&bp->phy_lock)
b6016b76
MC
2195{
2196 u32 val;
2197 int rc = 0;
2198
583c28e5
MC
2199 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2200 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
b6016b76 2201
ca58c3af
MC
2202 bp->mii_bmcr = MII_BMCR;
2203 bp->mii_bmsr = MII_BMSR;
27a005b8 2204 bp->mii_bmsr1 = MII_BMSR;
ca58c3af
MC
2205 bp->mii_adv = MII_ADVERTISE;
2206 bp->mii_lpa = MII_LPA;
2207
b6016b76
MC
2208 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2209
583c28e5 2210 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
2211 goto setup_phy;
2212
b6016b76
MC
2213 bnx2_read_phy(bp, MII_PHYSID1, &val);
2214 bp->phy_id = val << 16;
2215 bnx2_read_phy(bp, MII_PHYSID2, &val);
2216 bp->phy_id |= val & 0xffff;
2217
583c28e5 2218 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5b0c76ad 2219 if (CHIP_NUM(bp) == CHIP_NUM_5706)
9a120bc5 2220 rc = bnx2_init_5706s_phy(bp, reset_phy);
5b0c76ad 2221 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
9a120bc5 2222 rc = bnx2_init_5708s_phy(bp, reset_phy);
27a005b8 2223 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
9a120bc5 2224 rc = bnx2_init_5709s_phy(bp, reset_phy);
b6016b76
MC
2225 }
2226 else {
9a120bc5 2227 rc = bnx2_init_copper_phy(bp, reset_phy);
b6016b76
MC
2228 }
2229
0d8a6571
MC
2230setup_phy:
2231 if (!rc)
2232 rc = bnx2_setup_phy(bp, bp->phy_port);
b6016b76
MC
2233
2234 return rc;
2235}
2236
2237static int
2238bnx2_set_mac_loopback(struct bnx2 *bp)
2239{
2240 u32 mac_mode;
2241
2242 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2243 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2244 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2245 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2246 bp->link_up = 1;
2247 return 0;
2248}
2249
bc5a0690
MC
2250static int bnx2_test_link(struct bnx2 *);
2251
2252static int
2253bnx2_set_phy_loopback(struct bnx2 *bp)
2254{
2255 u32 mac_mode;
2256 int rc, i;
2257
2258 spin_lock_bh(&bp->phy_lock);
ca58c3af 2259 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
bc5a0690
MC
2260 BMCR_SPEED1000);
2261 spin_unlock_bh(&bp->phy_lock);
2262 if (rc)
2263 return rc;
2264
2265 for (i = 0; i < 10; i++) {
2266 if (bnx2_test_link(bp) == 0)
2267 break;
80be4434 2268 msleep(100);
bc5a0690
MC
2269 }
2270
2271 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2272 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2273 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
59b47d8a 2274 BNX2_EMAC_MODE_25G_MODE);
bc5a0690
MC
2275
2276 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2277 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2278 bp->link_up = 1;
2279 return 0;
2280}
2281
b6016b76 2282static int
a2f13890 2283bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
b6016b76
MC
2284{
2285 int i;
2286 u32 val;
2287
b6016b76
MC
2288 bp->fw_wr_seq++;
2289 msg_data |= bp->fw_wr_seq;
2290
2726d6e1 2291 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
b6016b76 2292
a2f13890
MC
2293 if (!ack)
2294 return 0;
2295
b6016b76 2296 /* wait for an acknowledgement. */
40105c0b 2297 for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
b090ae2b 2298 msleep(10);
b6016b76 2299
2726d6e1 2300 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
b6016b76
MC
2301
2302 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2303 break;
2304 }
b090ae2b
MC
2305 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2306 return 0;
b6016b76
MC
2307
2308 /* If we timed out, inform the firmware that this is the case. */
b090ae2b
MC
2309 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2310 if (!silent)
2311 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2312 "%x\n", msg_data);
b6016b76
MC
2313
2314 msg_data &= ~BNX2_DRV_MSG_CODE;
2315 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2316
2726d6e1 2317 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
b6016b76 2318
b6016b76
MC
2319 return -EBUSY;
2320 }
2321
b090ae2b
MC
2322 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2323 return -EIO;
2324
b6016b76
MC
2325 return 0;
2326}
2327
59b47d8a
MC
2328static int
2329bnx2_init_5709_context(struct bnx2 *bp)
2330{
2331 int i, ret = 0;
2332 u32 val;
2333
2334 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2335 val |= (BCM_PAGE_BITS - 8) << 16;
2336 REG_WR(bp, BNX2_CTX_COMMAND, val);
641bdcd5
MC
2337 for (i = 0; i < 10; i++) {
2338 val = REG_RD(bp, BNX2_CTX_COMMAND);
2339 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2340 break;
2341 udelay(2);
2342 }
2343 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2344 return -EBUSY;
2345
59b47d8a
MC
2346 for (i = 0; i < bp->ctx_pages; i++) {
2347 int j;
2348
352f7687
MC
2349 if (bp->ctx_blk[i])
2350 memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2351 else
2352 return -ENOMEM;
2353
59b47d8a
MC
2354 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2355 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2356 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2357 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2358 (u64) bp->ctx_blk_mapping[i] >> 32);
2359 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2360 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2361 for (j = 0; j < 10; j++) {
2362
2363 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2364 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2365 break;
2366 udelay(5);
2367 }
2368 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2369 ret = -EBUSY;
2370 break;
2371 }
2372 }
2373 return ret;
2374}
2375
b6016b76
MC
2376static void
2377bnx2_init_context(struct bnx2 *bp)
2378{
2379 u32 vcid;
2380
2381 vcid = 96;
2382 while (vcid) {
2383 u32 vcid_addr, pcid_addr, offset;
7947b20e 2384 int i;
b6016b76
MC
2385
2386 vcid--;
2387
2388 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2389 u32 new_vcid;
2390
2391 vcid_addr = GET_PCID_ADDR(vcid);
2392 if (vcid & 0x8) {
2393 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2394 }
2395 else {
2396 new_vcid = vcid;
2397 }
2398 pcid_addr = GET_PCID_ADDR(new_vcid);
2399 }
2400 else {
2401 vcid_addr = GET_CID_ADDR(vcid);
2402 pcid_addr = vcid_addr;
2403 }
2404
7947b20e
MC
2405 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2406 vcid_addr += (i << PHY_CTX_SHIFT);
2407 pcid_addr += (i << PHY_CTX_SHIFT);
b6016b76 2408
5d5d0015 2409 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
7947b20e 2410 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
b6016b76 2411
7947b20e
MC
2412 /* Zero out the context. */
2413 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
62a8313c 2414 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
7947b20e 2415 }
b6016b76
MC
2416 }
2417}
2418
2419static int
2420bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2421{
2422 u16 *good_mbuf;
2423 u32 good_mbuf_cnt;
2424 u32 val;
2425
2426 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2427 if (good_mbuf == NULL) {
2428 printk(KERN_ERR PFX "Failed to allocate memory in "
2429 "bnx2_alloc_bad_rbuf\n");
2430 return -ENOMEM;
2431 }
2432
2433 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2434 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2435
2436 good_mbuf_cnt = 0;
2437
2438 /* Allocate a bunch of mbufs and save the good ones in an array. */
2726d6e1 2439 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
b6016b76 2440 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2726d6e1
MC
2441 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2442 BNX2_RBUF_COMMAND_ALLOC_REQ);
b6016b76 2443
2726d6e1 2444 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
b6016b76
MC
2445
2446 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2447
2448 /* The addresses with Bit 9 set are bad memory blocks. */
2449 if (!(val & (1 << 9))) {
2450 good_mbuf[good_mbuf_cnt] = (u16) val;
2451 good_mbuf_cnt++;
2452 }
2453
2726d6e1 2454 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
b6016b76
MC
2455 }
2456
2457 /* Free the good ones back to the mbuf pool thus discarding
2458 * all the bad ones. */
2459 while (good_mbuf_cnt) {
2460 good_mbuf_cnt--;
2461
2462 val = good_mbuf[good_mbuf_cnt];
2463 val = (val << 9) | val | 1;
2464
2726d6e1 2465 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
b6016b76
MC
2466 }
2467 kfree(good_mbuf);
2468 return 0;
2469}
2470
2471static void
5fcaed01 2472bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
b6016b76
MC
2473{
2474 u32 val;
b6016b76
MC
2475
2476 val = (mac_addr[0] << 8) | mac_addr[1];
2477
5fcaed01 2478 REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
b6016b76 2479
6aa20a22 2480 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
b6016b76
MC
2481 (mac_addr[4] << 8) | mac_addr[5];
2482
5fcaed01 2483 REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
b6016b76
MC
2484}
2485
47bf4246 2486static inline int
bb4f98ab 2487bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
47bf4246
MC
2488{
2489 dma_addr_t mapping;
bb4f98ab 2490 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
47bf4246 2491 struct rx_bd *rxbd =
bb4f98ab 2492 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
47bf4246
MC
2493 struct page *page = alloc_page(GFP_ATOMIC);
2494
2495 if (!page)
2496 return -ENOMEM;
2497 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2498 PCI_DMA_FROMDEVICE);
3d16af86
BL
2499 if (pci_dma_mapping_error(bp->pdev, mapping)) {
2500 __free_page(page);
2501 return -EIO;
2502 }
2503
47bf4246
MC
2504 rx_pg->page = page;
2505 pci_unmap_addr_set(rx_pg, mapping, mapping);
2506 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2507 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2508 return 0;
2509}
2510
2511static void
bb4f98ab 2512bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
47bf4246 2513{
bb4f98ab 2514 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
47bf4246
MC
2515 struct page *page = rx_pg->page;
2516
2517 if (!page)
2518 return;
2519
2520 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2521 PCI_DMA_FROMDEVICE);
2522
2523 __free_page(page);
2524 rx_pg->page = NULL;
2525}
2526
b6016b76 2527static inline int
bb4f98ab 2528bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
b6016b76
MC
2529{
2530 struct sk_buff *skb;
bb4f98ab 2531 struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
b6016b76 2532 dma_addr_t mapping;
bb4f98ab 2533 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
b6016b76
MC
2534 unsigned long align;
2535
932f3772 2536 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
b6016b76
MC
2537 if (skb == NULL) {
2538 return -ENOMEM;
2539 }
2540
59b47d8a
MC
2541 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2542 skb_reserve(skb, BNX2_RX_ALIGN - align);
b6016b76 2543
b6016b76
MC
2544 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2545 PCI_DMA_FROMDEVICE);
3d16af86
BL
2546 if (pci_dma_mapping_error(bp->pdev, mapping)) {
2547 dev_kfree_skb(skb);
2548 return -EIO;
2549 }
b6016b76
MC
2550
2551 rx_buf->skb = skb;
2552 pci_unmap_addr_set(rx_buf, mapping, mapping);
2553
2554 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2555 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2556
bb4f98ab 2557 rxr->rx_prod_bseq += bp->rx_buf_use_size;
b6016b76
MC
2558
2559 return 0;
2560}
2561
da3e4fbe 2562static int
35efa7c1 2563bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
b6016b76 2564{
43e80b89 2565 struct status_block *sblk = bnapi->status_blk.msi;
b6016b76 2566 u32 new_link_state, old_link_state;
da3e4fbe 2567 int is_set = 1;
b6016b76 2568
da3e4fbe
MC
2569 new_link_state = sblk->status_attn_bits & event;
2570 old_link_state = sblk->status_attn_bits_ack & event;
b6016b76 2571 if (new_link_state != old_link_state) {
da3e4fbe
MC
2572 if (new_link_state)
2573 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2574 else
2575 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2576 } else
2577 is_set = 0;
2578
2579 return is_set;
2580}
2581
2582static void
35efa7c1 2583bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
da3e4fbe 2584{
74ecc62d
MC
2585 spin_lock(&bp->phy_lock);
2586
2587 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
b6016b76 2588 bnx2_set_link(bp);
35efa7c1 2589 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
0d8a6571
MC
2590 bnx2_set_remote_link(bp);
2591
74ecc62d
MC
2592 spin_unlock(&bp->phy_lock);
2593
b6016b76
MC
2594}
2595
ead7270b 2596static inline u16
35efa7c1 2597bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
ead7270b
MC
2598{
2599 u16 cons;
2600
43e80b89
MC
2601 /* Tell compiler that status block fields can change. */
2602 barrier();
2603 cons = *bnapi->hw_tx_cons_ptr;
581daf7e 2604 barrier();
ead7270b
MC
2605 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2606 cons++;
2607 return cons;
2608}
2609
57851d84
MC
2610static int
2611bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
b6016b76 2612{
35e9010b 2613 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
b6016b76 2614 u16 hw_cons, sw_cons, sw_ring_cons;
706bf240
BL
2615 int tx_pkt = 0, index;
2616 struct netdev_queue *txq;
2617
2618 index = (bnapi - bp->bnx2_napi);
2619 txq = netdev_get_tx_queue(bp->dev, index);
b6016b76 2620
35efa7c1 2621 hw_cons = bnx2_get_hw_tx_cons(bnapi);
35e9010b 2622 sw_cons = txr->tx_cons;
b6016b76
MC
2623
2624 while (sw_cons != hw_cons) {
3d16af86 2625 struct sw_tx_bd *tx_buf;
b6016b76
MC
2626 struct sk_buff *skb;
2627 int i, last;
2628
2629 sw_ring_cons = TX_RING_IDX(sw_cons);
2630
35e9010b 2631 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
b6016b76 2632 skb = tx_buf->skb;
1d39ed56 2633
d62fda08
ED
2634 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2635 prefetch(&skb->end);
2636
b6016b76 2637 /* partial BD completions possible with TSO packets */
d62fda08 2638 if (tx_buf->is_gso) {
b6016b76
MC
2639 u16 last_idx, last_ring_idx;
2640
d62fda08
ED
2641 last_idx = sw_cons + tx_buf->nr_frags + 1;
2642 last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
b6016b76
MC
2643 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2644 last_idx++;
2645 }
2646 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2647 break;
2648 }
2649 }
1d39ed56 2650
3d16af86 2651 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
b6016b76
MC
2652
2653 tx_buf->skb = NULL;
d62fda08 2654 last = tx_buf->nr_frags;
b6016b76
MC
2655
2656 for (i = 0; i < last; i++) {
2657 sw_cons = NEXT_TX_BD(sw_cons);
b6016b76
MC
2658 }
2659
2660 sw_cons = NEXT_TX_BD(sw_cons);
2661
745720e5 2662 dev_kfree_skb(skb);
57851d84
MC
2663 tx_pkt++;
2664 if (tx_pkt == budget)
2665 break;
b6016b76 2666
d62fda08
ED
2667 if (hw_cons == sw_cons)
2668 hw_cons = bnx2_get_hw_tx_cons(bnapi);
b6016b76
MC
2669 }
2670
35e9010b
MC
2671 txr->hw_tx_cons = hw_cons;
2672 txr->tx_cons = sw_cons;
706bf240 2673
2f8af120 2674 /* Need to make the tx_cons update visible to bnx2_start_xmit()
706bf240 2675 * before checking for netif_tx_queue_stopped(). Without the
2f8af120
MC
2676 * memory barrier, there is a small possibility that bnx2_start_xmit()
2677 * will miss it and cause the queue to be stopped forever.
2678 */
2679 smp_mb();
b6016b76 2680
706bf240 2681 if (unlikely(netif_tx_queue_stopped(txq)) &&
35e9010b 2682 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
706bf240
BL
2683 __netif_tx_lock(txq, smp_processor_id());
2684 if ((netif_tx_queue_stopped(txq)) &&
35e9010b 2685 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
706bf240
BL
2686 netif_tx_wake_queue(txq);
2687 __netif_tx_unlock(txq);
b6016b76 2688 }
706bf240 2689
57851d84 2690 return tx_pkt;
b6016b76
MC
2691}
2692
1db82f2a 2693static void
bb4f98ab 2694bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
a1f60190 2695 struct sk_buff *skb, int count)
1db82f2a
MC
2696{
2697 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2698 struct rx_bd *cons_bd, *prod_bd;
1db82f2a 2699 int i;
3d16af86 2700 u16 hw_prod, prod;
bb4f98ab 2701 u16 cons = rxr->rx_pg_cons;
1db82f2a 2702
3d16af86
BL
2703 cons_rx_pg = &rxr->rx_pg_ring[cons];
2704
2705 /* The caller was unable to allocate a new page to replace the
2706 * last one in the frags array, so we need to recycle that page
2707 * and then free the skb.
2708 */
2709 if (skb) {
2710 struct page *page;
2711 struct skb_shared_info *shinfo;
2712
2713 shinfo = skb_shinfo(skb);
2714 shinfo->nr_frags--;
2715 page = shinfo->frags[shinfo->nr_frags].page;
2716 shinfo->frags[shinfo->nr_frags].page = NULL;
2717
2718 cons_rx_pg->page = page;
2719 dev_kfree_skb(skb);
2720 }
2721
2722 hw_prod = rxr->rx_pg_prod;
2723
1db82f2a
MC
2724 for (i = 0; i < count; i++) {
2725 prod = RX_PG_RING_IDX(hw_prod);
2726
bb4f98ab
MC
2727 prod_rx_pg = &rxr->rx_pg_ring[prod];
2728 cons_rx_pg = &rxr->rx_pg_ring[cons];
2729 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2730 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1db82f2a 2731
1db82f2a
MC
2732 if (prod != cons) {
2733 prod_rx_pg->page = cons_rx_pg->page;
2734 cons_rx_pg->page = NULL;
2735 pci_unmap_addr_set(prod_rx_pg, mapping,
2736 pci_unmap_addr(cons_rx_pg, mapping));
2737
2738 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2739 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2740
2741 }
2742 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2743 hw_prod = NEXT_RX_BD(hw_prod);
2744 }
bb4f98ab
MC
2745 rxr->rx_pg_prod = hw_prod;
2746 rxr->rx_pg_cons = cons;
1db82f2a
MC
2747}
2748
b6016b76 2749static inline void
bb4f98ab
MC
2750bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2751 struct sk_buff *skb, u16 cons, u16 prod)
b6016b76 2752{
236b6394
MC
2753 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2754 struct rx_bd *cons_bd, *prod_bd;
2755
bb4f98ab
MC
2756 cons_rx_buf = &rxr->rx_buf_ring[cons];
2757 prod_rx_buf = &rxr->rx_buf_ring[prod];
b6016b76
MC
2758
2759 pci_dma_sync_single_for_device(bp->pdev,
2760 pci_unmap_addr(cons_rx_buf, mapping),
601d3d18 2761 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
b6016b76 2762
bb4f98ab 2763 rxr->rx_prod_bseq += bp->rx_buf_use_size;
b6016b76 2764
236b6394 2765 prod_rx_buf->skb = skb;
b6016b76 2766
236b6394
MC
2767 if (cons == prod)
2768 return;
b6016b76 2769
236b6394
MC
2770 pci_unmap_addr_set(prod_rx_buf, mapping,
2771 pci_unmap_addr(cons_rx_buf, mapping));
2772
bb4f98ab
MC
2773 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2774 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
236b6394
MC
2775 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2776 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
b6016b76
MC
2777}
2778
85833c62 2779static int
bb4f98ab 2780bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
a1f60190
MC
2781 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2782 u32 ring_idx)
85833c62
MC
2783{
2784 int err;
2785 u16 prod = ring_idx & 0xffff;
2786
bb4f98ab 2787 err = bnx2_alloc_rx_skb(bp, rxr, prod);
85833c62 2788 if (unlikely(err)) {
bb4f98ab 2789 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
1db82f2a
MC
2790 if (hdr_len) {
2791 unsigned int raw_len = len + 4;
2792 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2793
bb4f98ab 2794 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
1db82f2a 2795 }
85833c62
MC
2796 return err;
2797 }
2798
d89cb6af 2799 skb_reserve(skb, BNX2_RX_OFFSET);
85833c62
MC
2800 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2801 PCI_DMA_FROMDEVICE);
2802
1db82f2a
MC
2803 if (hdr_len == 0) {
2804 skb_put(skb, len);
2805 return 0;
2806 } else {
2807 unsigned int i, frag_len, frag_size, pages;
2808 struct sw_pg *rx_pg;
bb4f98ab
MC
2809 u16 pg_cons = rxr->rx_pg_cons;
2810 u16 pg_prod = rxr->rx_pg_prod;
1db82f2a
MC
2811
2812 frag_size = len + 4 - hdr_len;
2813 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2814 skb_put(skb, hdr_len);
2815
2816 for (i = 0; i < pages; i++) {
3d16af86
BL
2817 dma_addr_t mapping_old;
2818
1db82f2a
MC
2819 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2820 if (unlikely(frag_len <= 4)) {
2821 unsigned int tail = 4 - frag_len;
2822
bb4f98ab
MC
2823 rxr->rx_pg_cons = pg_cons;
2824 rxr->rx_pg_prod = pg_prod;
2825 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
a1f60190 2826 pages - i);
1db82f2a
MC
2827 skb->len -= tail;
2828 if (i == 0) {
2829 skb->tail -= tail;
2830 } else {
2831 skb_frag_t *frag =
2832 &skb_shinfo(skb)->frags[i - 1];
2833 frag->size -= tail;
2834 skb->data_len -= tail;
2835 skb->truesize -= tail;
2836 }
2837 return 0;
2838 }
bb4f98ab 2839 rx_pg = &rxr->rx_pg_ring[pg_cons];
1db82f2a 2840
3d16af86
BL
2841 /* Don't unmap yet. If we're unable to allocate a new
2842 * page, we need to recycle the page and the DMA addr.
2843 */
2844 mapping_old = pci_unmap_addr(rx_pg, mapping);
1db82f2a
MC
2845 if (i == pages - 1)
2846 frag_len -= 4;
2847
2848 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2849 rx_pg->page = NULL;
2850
bb4f98ab
MC
2851 err = bnx2_alloc_rx_page(bp, rxr,
2852 RX_PG_RING_IDX(pg_prod));
1db82f2a 2853 if (unlikely(err)) {
bb4f98ab
MC
2854 rxr->rx_pg_cons = pg_cons;
2855 rxr->rx_pg_prod = pg_prod;
2856 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
a1f60190 2857 pages - i);
1db82f2a
MC
2858 return err;
2859 }
2860
3d16af86
BL
2861 pci_unmap_page(bp->pdev, mapping_old,
2862 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2863
1db82f2a
MC
2864 frag_size -= frag_len;
2865 skb->data_len += frag_len;
2866 skb->truesize += frag_len;
2867 skb->len += frag_len;
2868
2869 pg_prod = NEXT_RX_BD(pg_prod);
2870 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2871 }
bb4f98ab
MC
2872 rxr->rx_pg_prod = pg_prod;
2873 rxr->rx_pg_cons = pg_cons;
1db82f2a 2874 }
85833c62
MC
2875 return 0;
2876}
2877
c09c2627 2878static inline u16
35efa7c1 2879bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
c09c2627 2880{
bb4f98ab
MC
2881 u16 cons;
2882
43e80b89
MC
2883 /* Tell compiler that status block fields can change. */
2884 barrier();
2885 cons = *bnapi->hw_rx_cons_ptr;
581daf7e 2886 barrier();
c09c2627
MC
2887 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2888 cons++;
2889 return cons;
2890}
2891
b6016b76 2892static int
35efa7c1 2893bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
b6016b76 2894{
bb4f98ab 2895 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
b6016b76
MC
2896 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2897 struct l2_fhdr *rx_hdr;
1db82f2a 2898 int rx_pkt = 0, pg_ring_used = 0;
b6016b76 2899
35efa7c1 2900 hw_cons = bnx2_get_hw_rx_cons(bnapi);
bb4f98ab
MC
2901 sw_cons = rxr->rx_cons;
2902 sw_prod = rxr->rx_prod;
b6016b76
MC
2903
2904 /* Memory barrier necessary as speculative reads of the rx
2905 * buffer can be ahead of the index in the status block
2906 */
2907 rmb();
2908 while (sw_cons != hw_cons) {
1db82f2a 2909 unsigned int len, hdr_len;
ade2bfe7 2910 u32 status;
b6016b76
MC
2911 struct sw_bd *rx_buf;
2912 struct sk_buff *skb;
236b6394 2913 dma_addr_t dma_addr;
f22828e8
MC
2914 u16 vtag = 0;
2915 int hw_vlan __maybe_unused = 0;
b6016b76
MC
2916
2917 sw_ring_cons = RX_RING_IDX(sw_cons);
2918 sw_ring_prod = RX_RING_IDX(sw_prod);
2919
bb4f98ab 2920 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
b6016b76 2921 skb = rx_buf->skb;
236b6394
MC
2922
2923 rx_buf->skb = NULL;
2924
2925 dma_addr = pci_unmap_addr(rx_buf, mapping);
2926
2927 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
601d3d18
BL
2928 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
2929 PCI_DMA_FROMDEVICE);
b6016b76
MC
2930
2931 rx_hdr = (struct l2_fhdr *) skb->data;
1db82f2a 2932 len = rx_hdr->l2_fhdr_pkt_len;
990ec380 2933 status = rx_hdr->l2_fhdr_status;
b6016b76 2934
1db82f2a
MC
2935 hdr_len = 0;
2936 if (status & L2_FHDR_STATUS_SPLIT) {
2937 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2938 pg_ring_used = 1;
2939 } else if (len > bp->rx_jumbo_thresh) {
2940 hdr_len = bp->rx_jumbo_thresh;
2941 pg_ring_used = 1;
2942 }
2943
990ec380
MC
2944 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
2945 L2_FHDR_ERRORS_PHY_DECODE |
2946 L2_FHDR_ERRORS_ALIGNMENT |
2947 L2_FHDR_ERRORS_TOO_SHORT |
2948 L2_FHDR_ERRORS_GIANT_FRAME))) {
2949
2950 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
2951 sw_ring_prod);
2952 if (pg_ring_used) {
2953 int pages;
2954
2955 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
2956
2957 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2958 }
2959 goto next_rx;
2960 }
2961
1db82f2a 2962 len -= 4;
b6016b76 2963
5d5d0015 2964 if (len <= bp->rx_copy_thresh) {
b6016b76
MC
2965 struct sk_buff *new_skb;
2966
f22828e8 2967 new_skb = netdev_alloc_skb(bp->dev, len + 6);
85833c62 2968 if (new_skb == NULL) {
bb4f98ab 2969 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
85833c62
MC
2970 sw_ring_prod);
2971 goto next_rx;
2972 }
b6016b76
MC
2973
2974 /* aligned copy */
d89cb6af 2975 skb_copy_from_linear_data_offset(skb,
f22828e8
MC
2976 BNX2_RX_OFFSET - 6,
2977 new_skb->data, len + 6);
2978 skb_reserve(new_skb, 6);
b6016b76 2979 skb_put(new_skb, len);
b6016b76 2980
bb4f98ab 2981 bnx2_reuse_rx_skb(bp, rxr, skb,
b6016b76
MC
2982 sw_ring_cons, sw_ring_prod);
2983
2984 skb = new_skb;
bb4f98ab 2985 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
a1f60190 2986 dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
b6016b76 2987 goto next_rx;
b6016b76 2988
f22828e8
MC
2989 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
2990 !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
2991 vtag = rx_hdr->l2_fhdr_vlan_tag;
2992#ifdef BCM_VLAN
2993 if (bp->vlgrp)
2994 hw_vlan = 1;
2995 else
2996#endif
2997 {
2998 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
2999 __skb_push(skb, 4);
3000
3001 memmove(ve, skb->data + 4, ETH_ALEN * 2);
3002 ve->h_vlan_proto = htons(ETH_P_8021Q);
3003 ve->h_vlan_TCI = htons(vtag);
3004 len += 4;
3005 }
3006 }
3007
b6016b76
MC
3008 skb->protocol = eth_type_trans(skb, bp->dev);
3009
3010 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
d1e100ba 3011 (ntohs(skb->protocol) != 0x8100)) {
b6016b76 3012
745720e5 3013 dev_kfree_skb(skb);
b6016b76
MC
3014 goto next_rx;
3015
3016 }
3017
b6016b76
MC
3018 skb->ip_summed = CHECKSUM_NONE;
3019 if (bp->rx_csum &&
3020 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3021 L2_FHDR_STATUS_UDP_DATAGRAM))) {
3022
ade2bfe7
MC
3023 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3024 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
b6016b76
MC
3025 skb->ip_summed = CHECKSUM_UNNECESSARY;
3026 }
3027
0c8dfc83
DM
3028 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3029
b6016b76 3030#ifdef BCM_VLAN
f22828e8
MC
3031 if (hw_vlan)
3032 vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
b6016b76
MC
3033 else
3034#endif
3035 netif_receive_skb(skb);
3036
b6016b76
MC
3037 rx_pkt++;
3038
3039next_rx:
b6016b76
MC
3040 sw_cons = NEXT_RX_BD(sw_cons);
3041 sw_prod = NEXT_RX_BD(sw_prod);
3042
3043 if ((rx_pkt == budget))
3044 break;
f4e418f7
MC
3045
3046 /* Refresh hw_cons to see if there is new work */
3047 if (sw_cons == hw_cons) {
35efa7c1 3048 hw_cons = bnx2_get_hw_rx_cons(bnapi);
f4e418f7
MC
3049 rmb();
3050 }
b6016b76 3051 }
bb4f98ab
MC
3052 rxr->rx_cons = sw_cons;
3053 rxr->rx_prod = sw_prod;
b6016b76 3054
1db82f2a 3055 if (pg_ring_used)
bb4f98ab 3056 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
1db82f2a 3057
bb4f98ab 3058 REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
b6016b76 3059
bb4f98ab 3060 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
b6016b76
MC
3061
3062 mmiowb();
3063
3064 return rx_pkt;
3065
3066}
3067
3068/* MSI ISR - The only difference between this and the INTx ISR
3069 * is that the MSI interrupt is always serviced.
3070 */
3071static irqreturn_t
7d12e780 3072bnx2_msi(int irq, void *dev_instance)
b6016b76 3073{
f0ea2e63
MC
3074 struct bnx2_napi *bnapi = dev_instance;
3075 struct bnx2 *bp = bnapi->bp;
b6016b76 3076
43e80b89 3077 prefetch(bnapi->status_blk.msi);
b6016b76
MC
3078 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3079 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3080 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3081
3082 /* Return here if interrupt is disabled. */
73eef4cd
MC
3083 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3084 return IRQ_HANDLED;
b6016b76 3085
288379f0 3086 napi_schedule(&bnapi->napi);
b6016b76 3087
73eef4cd 3088 return IRQ_HANDLED;
b6016b76
MC
3089}
3090
8e6a72c4
MC
3091static irqreturn_t
3092bnx2_msi_1shot(int irq, void *dev_instance)
3093{
f0ea2e63
MC
3094 struct bnx2_napi *bnapi = dev_instance;
3095 struct bnx2 *bp = bnapi->bp;
8e6a72c4 3096
43e80b89 3097 prefetch(bnapi->status_blk.msi);
8e6a72c4
MC
3098
3099 /* Return here if interrupt is disabled. */
3100 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3101 return IRQ_HANDLED;
3102
288379f0 3103 napi_schedule(&bnapi->napi);
8e6a72c4
MC
3104
3105 return IRQ_HANDLED;
3106}
3107
b6016b76 3108static irqreturn_t
7d12e780 3109bnx2_interrupt(int irq, void *dev_instance)
b6016b76 3110{
f0ea2e63
MC
3111 struct bnx2_napi *bnapi = dev_instance;
3112 struct bnx2 *bp = bnapi->bp;
43e80b89 3113 struct status_block *sblk = bnapi->status_blk.msi;
b6016b76
MC
3114
3115 /* When using INTx, it is possible for the interrupt to arrive
3116 * at the CPU before the status block posted prior to the
3117 * interrupt. Reading a register will flush the status block.
3118 * When using MSI, the MSI message will always complete after
3119 * the status block write.
3120 */
35efa7c1 3121 if ((sblk->status_idx == bnapi->last_status_idx) &&
b6016b76
MC
3122 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3123 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
73eef4cd 3124 return IRQ_NONE;
b6016b76
MC
3125
3126 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3127 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3128 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3129
b8a7ce7b
MC
3130 /* Read back to deassert IRQ immediately to avoid too many
3131 * spurious interrupts.
3132 */
3133 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3134
b6016b76 3135 /* Return here if interrupt is shared and is disabled. */
73eef4cd
MC
3136 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3137 return IRQ_HANDLED;
b6016b76 3138
288379f0 3139 if (napi_schedule_prep(&bnapi->napi)) {
35efa7c1 3140 bnapi->last_status_idx = sblk->status_idx;
288379f0 3141 __napi_schedule(&bnapi->napi);
b8a7ce7b 3142 }
b6016b76 3143
73eef4cd 3144 return IRQ_HANDLED;
b6016b76
MC
3145}
3146
f4e418f7 3147static inline int
43e80b89 3148bnx2_has_fast_work(struct bnx2_napi *bnapi)
f4e418f7 3149{
35e9010b 3150 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
bb4f98ab 3151 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
f4e418f7 3152
bb4f98ab 3153 if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
35e9010b 3154 (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
f4e418f7 3155 return 1;
43e80b89
MC
3156 return 0;
3157}
3158
3159#define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
3160 STATUS_ATTN_BITS_TIMER_ABORT)
3161
3162static inline int
3163bnx2_has_work(struct bnx2_napi *bnapi)
3164{
3165 struct status_block *sblk = bnapi->status_blk.msi;
3166
3167 if (bnx2_has_fast_work(bnapi))
3168 return 1;
f4e418f7 3169
da3e4fbe
MC
3170 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3171 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
f4e418f7
MC
3172 return 1;
3173
3174 return 0;
3175}
3176
efba0180
MC
3177static void
3178bnx2_chk_missed_msi(struct bnx2 *bp)
3179{
3180 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3181 u32 msi_ctrl;
3182
3183 if (bnx2_has_work(bnapi)) {
3184 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3185 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3186 return;
3187
3188 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3189 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3190 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3191 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3192 bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3193 }
3194 }
3195
3196 bp->idle_chk_status_idx = bnapi->last_status_idx;
3197}
3198
43e80b89 3199static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
b6016b76 3200{
43e80b89 3201 struct status_block *sblk = bnapi->status_blk.msi;
da3e4fbe
MC
3202 u32 status_attn_bits = sblk->status_attn_bits;
3203 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
b6016b76 3204
da3e4fbe
MC
3205 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3206 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
b6016b76 3207
35efa7c1 3208 bnx2_phy_int(bp, bnapi);
bf5295bb
MC
3209
3210 /* This is needed to take care of transient status
3211 * during link changes.
3212 */
3213 REG_WR(bp, BNX2_HC_COMMAND,
3214 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3215 REG_RD(bp, BNX2_HC_COMMAND);
b6016b76 3216 }
43e80b89
MC
3217}
3218
3219static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3220 int work_done, int budget)
3221{
3222 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3223 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
b6016b76 3224
35e9010b 3225 if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
57851d84 3226 bnx2_tx_int(bp, bnapi, 0);
b6016b76 3227
bb4f98ab 3228 if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
35efa7c1 3229 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
6aa20a22 3230
6f535763
DM
3231 return work_done;
3232}
3233
f0ea2e63
MC
3234static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3235{
3236 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3237 struct bnx2 *bp = bnapi->bp;
3238 int work_done = 0;
3239 struct status_block_msix *sblk = bnapi->status_blk.msix;
3240
3241 while (1) {
3242 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3243 if (unlikely(work_done >= budget))
3244 break;
3245
3246 bnapi->last_status_idx = sblk->status_idx;
3247 /* status idx must be read before checking for more work. */
3248 rmb();
3249 if (likely(!bnx2_has_fast_work(bnapi))) {
3250
288379f0 3251 napi_complete(napi);
f0ea2e63
MC
3252 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3253 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3254 bnapi->last_status_idx);
3255 break;
3256 }
3257 }
3258 return work_done;
3259}
3260
6f535763
DM
3261static int bnx2_poll(struct napi_struct *napi, int budget)
3262{
35efa7c1
MC
3263 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3264 struct bnx2 *bp = bnapi->bp;
6f535763 3265 int work_done = 0;
43e80b89 3266 struct status_block *sblk = bnapi->status_blk.msi;
6f535763
DM
3267
3268 while (1) {
43e80b89
MC
3269 bnx2_poll_link(bp, bnapi);
3270
35efa7c1 3271 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
f4e418f7 3272
35efa7c1 3273 /* bnapi->last_status_idx is used below to tell the hw how
6dee6421
MC
3274 * much work has been processed, so we must read it before
3275 * checking for more work.
3276 */
35efa7c1 3277 bnapi->last_status_idx = sblk->status_idx;
efba0180
MC
3278
3279 if (unlikely(work_done >= budget))
3280 break;
3281
6dee6421 3282 rmb();
35efa7c1 3283 if (likely(!bnx2_has_work(bnapi))) {
288379f0 3284 napi_complete(napi);
f86e82fb 3285 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
6f535763
DM
3286 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3287 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
35efa7c1 3288 bnapi->last_status_idx);
6dee6421 3289 break;
6f535763 3290 }
1269a8a6
MC
3291 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3292 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
6f535763 3293 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
35efa7c1 3294 bnapi->last_status_idx);
1269a8a6 3295
6f535763
DM
3296 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3297 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
35efa7c1 3298 bnapi->last_status_idx);
6f535763
DM
3299 break;
3300 }
b6016b76
MC
3301 }
3302
bea3348e 3303 return work_done;
b6016b76
MC
3304}
3305
932ff279 3306/* Called with rtnl_lock from vlan functions and also netif_tx_lock
b6016b76
MC
3307 * from set_multicast.
3308 */
3309static void
3310bnx2_set_rx_mode(struct net_device *dev)
3311{
972ec0d4 3312 struct bnx2 *bp = netdev_priv(dev);
b6016b76 3313 u32 rx_mode, sort_mode;
ccffad25 3314 struct netdev_hw_addr *ha;
b6016b76 3315 int i;
b6016b76 3316
9f52b564
MC
3317 if (!netif_running(dev))
3318 return;
3319
c770a65c 3320 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
3321
3322 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3323 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3324 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3325#ifdef BCM_VLAN
7c6337a1 3326 if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
b6016b76 3327 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
b6016b76 3328#else
7c6337a1 3329 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
e29054f9 3330 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
b6016b76
MC
3331#endif
3332 if (dev->flags & IFF_PROMISC) {
3333 /* Promiscuous mode. */
3334 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
7510873d
MC
3335 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3336 BNX2_RPM_SORT_USER0_PROM_VLAN;
b6016b76
MC
3337 }
3338 else if (dev->flags & IFF_ALLMULTI) {
3339 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3340 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3341 0xffffffff);
3342 }
3343 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3344 }
3345 else {
3346 /* Accept one or more multicast(s). */
3347 struct dev_mc_list *mclist;
3348 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3349 u32 regidx;
3350 u32 bit;
3351 u32 crc;
3352
3353 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3354
3355 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3356 i++, mclist = mclist->next) {
3357
3358 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3359 bit = crc & 0xff;
3360 regidx = (bit & 0xe0) >> 5;
3361 bit &= 0x1f;
3362 mc_filter[regidx] |= (1 << bit);
3363 }
3364
3365 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3366 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3367 mc_filter[i]);
3368 }
3369
3370 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3371 }
3372
5fcaed01
BL
3373 if (dev->uc_count > BNX2_MAX_UNICAST_ADDRESSES) {
3374 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3375 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3376 BNX2_RPM_SORT_USER0_PROM_VLAN;
3377 } else if (!(dev->flags & IFF_PROMISC)) {
5fcaed01 3378 /* Add all entries into to the match filter list */
ccffad25
JP
3379 i = 0;
3380 list_for_each_entry(ha, &dev->uc_list, list) {
3381 bnx2_set_mac_addr(bp, ha->addr,
5fcaed01
BL
3382 i + BNX2_START_UNICAST_ADDRESS_INDEX);
3383 sort_mode |= (1 <<
3384 (i + BNX2_START_UNICAST_ADDRESS_INDEX));
ccffad25 3385 i++;
5fcaed01
BL
3386 }
3387
3388 }
3389
b6016b76
MC
3390 if (rx_mode != bp->rx_mode) {
3391 bp->rx_mode = rx_mode;
3392 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3393 }
3394
3395 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3396 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3397 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3398
c770a65c 3399 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
3400}
3401
57579f76
MC
3402static int __devinit
3403check_fw_section(const struct firmware *fw,
3404 const struct bnx2_fw_file_section *section,
3405 u32 alignment, bool non_empty)
3406{
3407 u32 offset = be32_to_cpu(section->offset);
3408 u32 len = be32_to_cpu(section->len);
3409
3410 if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3411 return -EINVAL;
3412 if ((non_empty && len == 0) || len > fw->size - offset ||
3413 len & (alignment - 1))
3414 return -EINVAL;
3415 return 0;
3416}
3417
3418static int __devinit
3419check_mips_fw_entry(const struct firmware *fw,
3420 const struct bnx2_mips_fw_file_entry *entry)
3421{
3422 if (check_fw_section(fw, &entry->text, 4, true) ||
3423 check_fw_section(fw, &entry->data, 4, false) ||
3424 check_fw_section(fw, &entry->rodata, 4, false))
3425 return -EINVAL;
3426 return 0;
3427}
3428
3429static int __devinit
3430bnx2_request_firmware(struct bnx2 *bp)
b6016b76 3431{
57579f76 3432 const char *mips_fw_file, *rv2p_fw_file;
5ee1c326
BB
3433 const struct bnx2_mips_fw_file *mips_fw;
3434 const struct bnx2_rv2p_fw_file *rv2p_fw;
57579f76
MC
3435 int rc;
3436
3437 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3438 mips_fw_file = FW_MIPS_FILE_09;
3439 rv2p_fw_file = FW_RV2P_FILE_09;
3440 } else {
3441 mips_fw_file = FW_MIPS_FILE_06;
3442 rv2p_fw_file = FW_RV2P_FILE_06;
3443 }
3444
3445 rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3446 if (rc) {
3447 printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n",
3448 mips_fw_file);
3449 return rc;
3450 }
3451
3452 rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3453 if (rc) {
3454 printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n",
3455 rv2p_fw_file);
3456 return rc;
3457 }
5ee1c326
BB
3458 mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3459 rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3460 if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3461 check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3462 check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3463 check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3464 check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3465 check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
57579f76
MC
3466 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
3467 mips_fw_file);
3468 return -EINVAL;
3469 }
5ee1c326
BB
3470 if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3471 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3472 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
57579f76
MC
3473 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
3474 rv2p_fw_file);
3475 return -EINVAL;
3476 }
3477
3478 return 0;
3479}
3480
3481static u32
3482rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3483{
3484 switch (idx) {
3485 case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3486 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3487 rv2p_code |= RV2P_BD_PAGE_SIZE;
3488 break;
3489 }
3490 return rv2p_code;
3491}
3492
3493static int
3494load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3495 const struct bnx2_rv2p_fw_file_entry *fw_entry)
3496{
3497 u32 rv2p_code_len, file_offset;
3498 __be32 *rv2p_code;
b6016b76 3499 int i;
57579f76
MC
3500 u32 val, cmd, addr;
3501
3502 rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3503 file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3504
3505 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
b6016b76 3506
57579f76
MC
3507 if (rv2p_proc == RV2P_PROC1) {
3508 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3509 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3510 } else {
3511 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3512 addr = BNX2_RV2P_PROC2_ADDR_CMD;
d25be1d3 3513 }
b6016b76
MC
3514
3515 for (i = 0; i < rv2p_code_len; i += 8) {
57579f76 3516 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
b6016b76 3517 rv2p_code++;
57579f76 3518 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
b6016b76
MC
3519 rv2p_code++;
3520
57579f76
MC
3521 val = (i / 8) | cmd;
3522 REG_WR(bp, addr, val);
3523 }
3524
3525 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3526 for (i = 0; i < 8; i++) {
3527 u32 loc, code;
3528
3529 loc = be32_to_cpu(fw_entry->fixup[i]);
3530 if (loc && ((loc * 4) < rv2p_code_len)) {
3531 code = be32_to_cpu(*(rv2p_code + loc - 1));
3532 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3533 code = be32_to_cpu(*(rv2p_code + loc));
3534 code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3535 REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3536
3537 val = (loc / 2) | cmd;
3538 REG_WR(bp, addr, val);
b6016b76
MC
3539 }
3540 }
3541
3542 /* Reset the processor, un-stall is done later. */
3543 if (rv2p_proc == RV2P_PROC1) {
3544 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3545 }
3546 else {
3547 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3548 }
57579f76
MC
3549
3550 return 0;
b6016b76
MC
3551}
3552
af3ee519 3553static int
57579f76
MC
3554load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3555 const struct bnx2_mips_fw_file_entry *fw_entry)
b6016b76 3556{
57579f76
MC
3557 u32 addr, len, file_offset;
3558 __be32 *data;
b6016b76
MC
3559 u32 offset;
3560 u32 val;
3561
3562 /* Halt the CPU. */
2726d6e1 3563 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
b6016b76 3564 val |= cpu_reg->mode_value_halt;
2726d6e1
MC
3565 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3566 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
b6016b76
MC
3567
3568 /* Load the Text area. */
57579f76
MC
3569 addr = be32_to_cpu(fw_entry->text.addr);
3570 len = be32_to_cpu(fw_entry->text.len);
3571 file_offset = be32_to_cpu(fw_entry->text.offset);
3572 data = (__be32 *)(bp->mips_firmware->data + file_offset);
ea1f8d5c 3573
57579f76
MC
3574 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3575 if (len) {
b6016b76
MC
3576 int j;
3577
57579f76
MC
3578 for (j = 0; j < (len / 4); j++, offset += 4)
3579 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
b6016b76
MC
3580 }
3581
57579f76
MC
3582 /* Load the Data area. */
3583 addr = be32_to_cpu(fw_entry->data.addr);
3584 len = be32_to_cpu(fw_entry->data.len);
3585 file_offset = be32_to_cpu(fw_entry->data.offset);
3586 data = (__be32 *)(bp->mips_firmware->data + file_offset);
b6016b76 3587
57579f76
MC
3588 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3589 if (len) {
b6016b76
MC
3590 int j;
3591
57579f76
MC
3592 for (j = 0; j < (len / 4); j++, offset += 4)
3593 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
b6016b76
MC
3594 }
3595
3596 /* Load the Read-Only area. */
57579f76
MC
3597 addr = be32_to_cpu(fw_entry->rodata.addr);
3598 len = be32_to_cpu(fw_entry->rodata.len);
3599 file_offset = be32_to_cpu(fw_entry->rodata.offset);
3600 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3601
3602 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3603 if (len) {
b6016b76
MC
3604 int j;
3605
57579f76
MC
3606 for (j = 0; j < (len / 4); j++, offset += 4)
3607 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
b6016b76
MC
3608 }
3609
3610 /* Clear the pre-fetch instruction. */
2726d6e1 3611 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
57579f76
MC
3612
3613 val = be32_to_cpu(fw_entry->start_addr);
3614 bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
b6016b76
MC
3615
3616 /* Start the CPU. */
2726d6e1 3617 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
b6016b76 3618 val &= ~cpu_reg->mode_value_halt;
2726d6e1
MC
3619 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3620 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
af3ee519
MC
3621
3622 return 0;
b6016b76
MC
3623}
3624
fba9fe91 3625static int
b6016b76
MC
3626bnx2_init_cpus(struct bnx2 *bp)
3627{
57579f76
MC
3628 const struct bnx2_mips_fw_file *mips_fw =
3629 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3630 const struct bnx2_rv2p_fw_file *rv2p_fw =
3631 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3632 int rc;
b6016b76
MC
3633
3634 /* Initialize the RV2P processor. */
57579f76
MC
3635 load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3636 load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
b6016b76
MC
3637
3638 /* Initialize the RX Processor. */
57579f76 3639 rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
fba9fe91
MC
3640 if (rc)
3641 goto init_cpu_err;
3642
b6016b76 3643 /* Initialize the TX Processor. */
57579f76 3644 rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
fba9fe91
MC
3645 if (rc)
3646 goto init_cpu_err;
3647
b6016b76 3648 /* Initialize the TX Patch-up Processor. */
57579f76 3649 rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
fba9fe91
MC
3650 if (rc)
3651 goto init_cpu_err;
3652
b6016b76 3653 /* Initialize the Completion Processor. */
57579f76 3654 rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
fba9fe91
MC
3655 if (rc)
3656 goto init_cpu_err;
3657
d43584c8 3658 /* Initialize the Command Processor. */
57579f76 3659 rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
b6016b76 3660
fba9fe91 3661init_cpu_err:
fba9fe91 3662 return rc;
b6016b76
MC
3663}
3664
3665static int
829ca9a3 3666bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
b6016b76
MC
3667{
3668 u16 pmcsr;
3669
3670 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3671
3672 switch (state) {
829ca9a3 3673 case PCI_D0: {
b6016b76
MC
3674 u32 val;
3675
3676 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3677 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3678 PCI_PM_CTRL_PME_STATUS);
3679
3680 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3681 /* delay required during transition out of D3hot */
3682 msleep(20);
3683
3684 val = REG_RD(bp, BNX2_EMAC_MODE);
3685 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3686 val &= ~BNX2_EMAC_MODE_MPKT;
3687 REG_WR(bp, BNX2_EMAC_MODE, val);
3688
3689 val = REG_RD(bp, BNX2_RPM_CONFIG);
3690 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3691 REG_WR(bp, BNX2_RPM_CONFIG, val);
3692 break;
3693 }
829ca9a3 3694 case PCI_D3hot: {
b6016b76
MC
3695 int i;
3696 u32 val, wol_msg;
3697
3698 if (bp->wol) {
3699 u32 advertising;
3700 u8 autoneg;
3701
3702 autoneg = bp->autoneg;
3703 advertising = bp->advertising;
3704
239cd343
MC
3705 if (bp->phy_port == PORT_TP) {
3706 bp->autoneg = AUTONEG_SPEED;
3707 bp->advertising = ADVERTISED_10baseT_Half |
3708 ADVERTISED_10baseT_Full |
3709 ADVERTISED_100baseT_Half |
3710 ADVERTISED_100baseT_Full |
3711 ADVERTISED_Autoneg;
3712 }
b6016b76 3713
239cd343
MC
3714 spin_lock_bh(&bp->phy_lock);
3715 bnx2_setup_phy(bp, bp->phy_port);
3716 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
3717
3718 bp->autoneg = autoneg;
3719 bp->advertising = advertising;
3720
5fcaed01 3721 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
b6016b76
MC
3722
3723 val = REG_RD(bp, BNX2_EMAC_MODE);
3724
3725 /* Enable port mode. */
3726 val &= ~BNX2_EMAC_MODE_PORT;
239cd343 3727 val |= BNX2_EMAC_MODE_MPKT_RCVD |
b6016b76 3728 BNX2_EMAC_MODE_ACPI_RCVD |
b6016b76 3729 BNX2_EMAC_MODE_MPKT;
239cd343
MC
3730 if (bp->phy_port == PORT_TP)
3731 val |= BNX2_EMAC_MODE_PORT_MII;
3732 else {
3733 val |= BNX2_EMAC_MODE_PORT_GMII;
3734 if (bp->line_speed == SPEED_2500)
3735 val |= BNX2_EMAC_MODE_25G_MODE;
3736 }
b6016b76
MC
3737
3738 REG_WR(bp, BNX2_EMAC_MODE, val);
3739
3740 /* receive all multicast */
3741 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3742 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3743 0xffffffff);
3744 }
3745 REG_WR(bp, BNX2_EMAC_RX_MODE,
3746 BNX2_EMAC_RX_MODE_SORT_MODE);
3747
3748 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3749 BNX2_RPM_SORT_USER0_MC_EN;
3750 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3751 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3752 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3753 BNX2_RPM_SORT_USER0_ENA);
3754
3755 /* Need to enable EMAC and RPM for WOL. */
3756 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3757 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3758 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3759 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3760
3761 val = REG_RD(bp, BNX2_RPM_CONFIG);
3762 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3763 REG_WR(bp, BNX2_RPM_CONFIG, val);
3764
3765 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3766 }
3767 else {
3768 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3769 }
3770
f86e82fb 3771 if (!(bp->flags & BNX2_FLAG_NO_WOL))
a2f13890
MC
3772 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3773 1, 0);
b6016b76
MC
3774
3775 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3776 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3777 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3778
3779 if (bp->wol)
3780 pmcsr |= 3;
3781 }
3782 else {
3783 pmcsr |= 3;
3784 }
3785 if (bp->wol) {
3786 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3787 }
3788 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3789 pmcsr);
3790
3791 /* No more memory access after this point until
3792 * device is brought back to D0.
3793 */
3794 udelay(50);
3795 break;
3796 }
3797 default:
3798 return -EINVAL;
3799 }
3800 return 0;
3801}
3802
3803static int
3804bnx2_acquire_nvram_lock(struct bnx2 *bp)
3805{
3806 u32 val;
3807 int j;
3808
3809 /* Request access to the flash interface. */
3810 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3811 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3812 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3813 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3814 break;
3815
3816 udelay(5);
3817 }
3818
3819 if (j >= NVRAM_TIMEOUT_COUNT)
3820 return -EBUSY;
3821
3822 return 0;
3823}
3824
3825static int
3826bnx2_release_nvram_lock(struct bnx2 *bp)
3827{
3828 int j;
3829 u32 val;
3830
3831 /* Relinquish nvram interface. */
3832 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3833
3834 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3835 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3836 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3837 break;
3838
3839 udelay(5);
3840 }
3841
3842 if (j >= NVRAM_TIMEOUT_COUNT)
3843 return -EBUSY;
3844
3845 return 0;
3846}
3847
3848
3849static int
3850bnx2_enable_nvram_write(struct bnx2 *bp)
3851{
3852 u32 val;
3853
3854 val = REG_RD(bp, BNX2_MISC_CFG);
3855 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3856
e30372c9 3857 if (bp->flash_info->flags & BNX2_NV_WREN) {
b6016b76
MC
3858 int j;
3859
3860 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3861 REG_WR(bp, BNX2_NVM_COMMAND,
3862 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3863
3864 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3865 udelay(5);
3866
3867 val = REG_RD(bp, BNX2_NVM_COMMAND);
3868 if (val & BNX2_NVM_COMMAND_DONE)
3869 break;
3870 }
3871
3872 if (j >= NVRAM_TIMEOUT_COUNT)
3873 return -EBUSY;
3874 }
3875 return 0;
3876}
3877
3878static void
3879bnx2_disable_nvram_write(struct bnx2 *bp)
3880{
3881 u32 val;
3882
3883 val = REG_RD(bp, BNX2_MISC_CFG);
3884 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3885}
3886
3887
3888static void
3889bnx2_enable_nvram_access(struct bnx2 *bp)
3890{
3891 u32 val;
3892
3893 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3894 /* Enable both bits, even on read. */
6aa20a22 3895 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
b6016b76
MC
3896 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3897}
3898
3899static void
3900bnx2_disable_nvram_access(struct bnx2 *bp)
3901{
3902 u32 val;
3903
3904 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3905 /* Disable both bits, even after read. */
6aa20a22 3906 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
b6016b76
MC
3907 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3908 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3909}
3910
3911static int
3912bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3913{
3914 u32 cmd;
3915 int j;
3916
e30372c9 3917 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
b6016b76
MC
3918 /* Buffered flash, no erase needed */
3919 return 0;
3920
3921 /* Build an erase command */
3922 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3923 BNX2_NVM_COMMAND_DOIT;
3924
3925 /* Need to clear DONE bit separately. */
3926 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3927
3928 /* Address of the NVRAM to read from. */
3929 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3930
3931 /* Issue an erase command. */
3932 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3933
3934 /* Wait for completion. */
3935 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3936 u32 val;
3937
3938 udelay(5);
3939
3940 val = REG_RD(bp, BNX2_NVM_COMMAND);
3941 if (val & BNX2_NVM_COMMAND_DONE)
3942 break;
3943 }
3944
3945 if (j >= NVRAM_TIMEOUT_COUNT)
3946 return -EBUSY;
3947
3948 return 0;
3949}
3950
3951static int
3952bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3953{
3954 u32 cmd;
3955 int j;
3956
3957 /* Build the command word. */
3958 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3959
e30372c9
MC
3960 /* Calculate an offset of a buffered flash, not needed for 5709. */
3961 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
b6016b76
MC
3962 offset = ((offset / bp->flash_info->page_size) <<
3963 bp->flash_info->page_bits) +
3964 (offset % bp->flash_info->page_size);
3965 }
3966
3967 /* Need to clear DONE bit separately. */
3968 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3969
3970 /* Address of the NVRAM to read from. */
3971 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3972
3973 /* Issue a read command. */
3974 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3975
3976 /* Wait for completion. */
3977 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3978 u32 val;
3979
3980 udelay(5);
3981
3982 val = REG_RD(bp, BNX2_NVM_COMMAND);
3983 if (val & BNX2_NVM_COMMAND_DONE) {
b491edd5
AV
3984 __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
3985 memcpy(ret_val, &v, 4);
b6016b76
MC
3986 break;
3987 }
3988 }
3989 if (j >= NVRAM_TIMEOUT_COUNT)
3990 return -EBUSY;
3991
3992 return 0;
3993}
3994
3995
3996static int
3997bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3998{
b491edd5
AV
3999 u32 cmd;
4000 __be32 val32;
b6016b76
MC
4001 int j;
4002
4003 /* Build the command word. */
4004 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4005
e30372c9
MC
4006 /* Calculate an offset of a buffered flash, not needed for 5709. */
4007 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
b6016b76
MC
4008 offset = ((offset / bp->flash_info->page_size) <<
4009 bp->flash_info->page_bits) +
4010 (offset % bp->flash_info->page_size);
4011 }
4012
4013 /* Need to clear DONE bit separately. */
4014 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4015
4016 memcpy(&val32, val, 4);
b6016b76
MC
4017
4018 /* Write the data. */
b491edd5 4019 REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
b6016b76
MC
4020
4021 /* Address of the NVRAM to write to. */
4022 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4023
4024 /* Issue the write command. */
4025 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4026
4027 /* Wait for completion. */
4028 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4029 udelay(5);
4030
4031 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4032 break;
4033 }
4034 if (j >= NVRAM_TIMEOUT_COUNT)
4035 return -EBUSY;
4036
4037 return 0;
4038}
4039
4040static int
4041bnx2_init_nvram(struct bnx2 *bp)
4042{
4043 u32 val;
e30372c9 4044 int j, entry_count, rc = 0;
b6016b76
MC
4045 struct flash_spec *flash;
4046
e30372c9
MC
4047 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4048 bp->flash_info = &flash_5709;
4049 goto get_flash_size;
4050 }
4051
b6016b76
MC
4052 /* Determine the selected interface. */
4053 val = REG_RD(bp, BNX2_NVM_CFG1);
4054
ff8ac609 4055 entry_count = ARRAY_SIZE(flash_table);
b6016b76 4056
b6016b76
MC
4057 if (val & 0x40000000) {
4058
4059 /* Flash interface has been reconfigured */
4060 for (j = 0, flash = &flash_table[0]; j < entry_count;
37137709
MC
4061 j++, flash++) {
4062 if ((val & FLASH_BACKUP_STRAP_MASK) ==
4063 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
b6016b76
MC
4064 bp->flash_info = flash;
4065 break;
4066 }
4067 }
4068 }
4069 else {
37137709 4070 u32 mask;
b6016b76
MC
4071 /* Not yet been reconfigured */
4072
37137709
MC
4073 if (val & (1 << 23))
4074 mask = FLASH_BACKUP_STRAP_MASK;
4075 else
4076 mask = FLASH_STRAP_MASK;
4077
b6016b76
MC
4078 for (j = 0, flash = &flash_table[0]; j < entry_count;
4079 j++, flash++) {
4080
37137709 4081 if ((val & mask) == (flash->strapping & mask)) {
b6016b76
MC
4082 bp->flash_info = flash;
4083
4084 /* Request access to the flash interface. */
4085 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4086 return rc;
4087
4088 /* Enable access to flash interface */
4089 bnx2_enable_nvram_access(bp);
4090
4091 /* Reconfigure the flash interface */
4092 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4093 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4094 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4095 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4096
4097 /* Disable access to flash interface */
4098 bnx2_disable_nvram_access(bp);
4099 bnx2_release_nvram_lock(bp);
4100
4101 break;
4102 }
4103 }
4104 } /* if (val & 0x40000000) */
4105
4106 if (j == entry_count) {
4107 bp->flash_info = NULL;
2f23c523 4108 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
1122db71 4109 return -ENODEV;
b6016b76
MC
4110 }
4111
e30372c9 4112get_flash_size:
2726d6e1 4113 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
1122db71
MC
4114 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4115 if (val)
4116 bp->flash_size = val;
4117 else
4118 bp->flash_size = bp->flash_info->total_size;
4119
b6016b76
MC
4120 return rc;
4121}
4122
4123static int
4124bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4125 int buf_size)
4126{
4127 int rc = 0;
4128 u32 cmd_flags, offset32, len32, extra;
4129
4130 if (buf_size == 0)
4131 return 0;
4132
4133 /* Request access to the flash interface. */
4134 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4135 return rc;
4136
4137 /* Enable access to flash interface */
4138 bnx2_enable_nvram_access(bp);
4139
4140 len32 = buf_size;
4141 offset32 = offset;
4142 extra = 0;
4143
4144 cmd_flags = 0;
4145
4146 if (offset32 & 3) {
4147 u8 buf[4];
4148 u32 pre_len;
4149
4150 offset32 &= ~3;
4151 pre_len = 4 - (offset & 3);
4152
4153 if (pre_len >= len32) {
4154 pre_len = len32;
4155 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4156 BNX2_NVM_COMMAND_LAST;
4157 }
4158 else {
4159 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4160 }
4161
4162 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4163
4164 if (rc)
4165 return rc;
4166
4167 memcpy(ret_buf, buf + (offset & 3), pre_len);
4168
4169 offset32 += 4;
4170 ret_buf += pre_len;
4171 len32 -= pre_len;
4172 }
4173 if (len32 & 3) {
4174 extra = 4 - (len32 & 3);
4175 len32 = (len32 + 4) & ~3;
4176 }
4177
4178 if (len32 == 4) {
4179 u8 buf[4];
4180
4181 if (cmd_flags)
4182 cmd_flags = BNX2_NVM_COMMAND_LAST;
4183 else
4184 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4185 BNX2_NVM_COMMAND_LAST;
4186
4187 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4188
4189 memcpy(ret_buf, buf, 4 - extra);
4190 }
4191 else if (len32 > 0) {
4192 u8 buf[4];
4193
4194 /* Read the first word. */
4195 if (cmd_flags)
4196 cmd_flags = 0;
4197 else
4198 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4199
4200 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4201
4202 /* Advance to the next dword. */
4203 offset32 += 4;
4204 ret_buf += 4;
4205 len32 -= 4;
4206
4207 while (len32 > 4 && rc == 0) {
4208 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4209
4210 /* Advance to the next dword. */
4211 offset32 += 4;
4212 ret_buf += 4;
4213 len32 -= 4;
4214 }
4215
4216 if (rc)
4217 return rc;
4218
4219 cmd_flags = BNX2_NVM_COMMAND_LAST;
4220 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4221
4222 memcpy(ret_buf, buf, 4 - extra);
4223 }
4224
4225 /* Disable access to flash interface */
4226 bnx2_disable_nvram_access(bp);
4227
4228 bnx2_release_nvram_lock(bp);
4229
4230 return rc;
4231}
4232
4233static int
4234bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4235 int buf_size)
4236{
4237 u32 written, offset32, len32;
e6be763f 4238 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
b6016b76
MC
4239 int rc = 0;
4240 int align_start, align_end;
4241
4242 buf = data_buf;
4243 offset32 = offset;
4244 len32 = buf_size;
4245 align_start = align_end = 0;
4246
4247 if ((align_start = (offset32 & 3))) {
4248 offset32 &= ~3;
c873879c
MC
4249 len32 += align_start;
4250 if (len32 < 4)
4251 len32 = 4;
b6016b76
MC
4252 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4253 return rc;
4254 }
4255
4256 if (len32 & 3) {
c873879c
MC
4257 align_end = 4 - (len32 & 3);
4258 len32 += align_end;
4259 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4260 return rc;
b6016b76
MC
4261 }
4262
4263 if (align_start || align_end) {
e6be763f
MC
4264 align_buf = kmalloc(len32, GFP_KERNEL);
4265 if (align_buf == NULL)
b6016b76
MC
4266 return -ENOMEM;
4267 if (align_start) {
e6be763f 4268 memcpy(align_buf, start, 4);
b6016b76
MC
4269 }
4270 if (align_end) {
e6be763f 4271 memcpy(align_buf + len32 - 4, end, 4);
b6016b76 4272 }
e6be763f
MC
4273 memcpy(align_buf + align_start, data_buf, buf_size);
4274 buf = align_buf;
b6016b76
MC
4275 }
4276
e30372c9 4277 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
ae181bc4
MC
4278 flash_buffer = kmalloc(264, GFP_KERNEL);
4279 if (flash_buffer == NULL) {
4280 rc = -ENOMEM;
4281 goto nvram_write_end;
4282 }
4283 }
4284
b6016b76
MC
4285 written = 0;
4286 while ((written < len32) && (rc == 0)) {
4287 u32 page_start, page_end, data_start, data_end;
4288 u32 addr, cmd_flags;
4289 int i;
b6016b76
MC
4290
4291 /* Find the page_start addr */
4292 page_start = offset32 + written;
4293 page_start -= (page_start % bp->flash_info->page_size);
4294 /* Find the page_end addr */
4295 page_end = page_start + bp->flash_info->page_size;
4296 /* Find the data_start addr */
4297 data_start = (written == 0) ? offset32 : page_start;
4298 /* Find the data_end addr */
6aa20a22 4299 data_end = (page_end > offset32 + len32) ?
b6016b76
MC
4300 (offset32 + len32) : page_end;
4301
4302 /* Request access to the flash interface. */
4303 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4304 goto nvram_write_end;
4305
4306 /* Enable access to flash interface */
4307 bnx2_enable_nvram_access(bp);
4308
4309 cmd_flags = BNX2_NVM_COMMAND_FIRST;
e30372c9 4310 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
b6016b76
MC
4311 int j;
4312
4313 /* Read the whole page into the buffer
4314 * (non-buffer flash only) */
4315 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4316 if (j == (bp->flash_info->page_size - 4)) {
4317 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4318 }
4319 rc = bnx2_nvram_read_dword(bp,
6aa20a22
JG
4320 page_start + j,
4321 &flash_buffer[j],
b6016b76
MC
4322 cmd_flags);
4323
4324 if (rc)
4325 goto nvram_write_end;
4326
4327 cmd_flags = 0;
4328 }
4329 }
4330
4331 /* Enable writes to flash interface (unlock write-protect) */
4332 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4333 goto nvram_write_end;
4334
b6016b76
MC
4335 /* Loop to write back the buffer data from page_start to
4336 * data_start */
4337 i = 0;
e30372c9 4338 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
c873879c
MC
4339 /* Erase the page */
4340 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4341 goto nvram_write_end;
4342
4343 /* Re-enable the write again for the actual write */
4344 bnx2_enable_nvram_write(bp);
4345
b6016b76
MC
4346 for (addr = page_start; addr < data_start;
4347 addr += 4, i += 4) {
6aa20a22 4348
b6016b76
MC
4349 rc = bnx2_nvram_write_dword(bp, addr,
4350 &flash_buffer[i], cmd_flags);
4351
4352 if (rc != 0)
4353 goto nvram_write_end;
4354
4355 cmd_flags = 0;
4356 }
4357 }
4358
4359 /* Loop to write the new data from data_start to data_end */
bae25761 4360 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
b6016b76 4361 if ((addr == page_end - 4) ||
e30372c9 4362 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
b6016b76
MC
4363 (addr == data_end - 4))) {
4364
4365 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4366 }
4367 rc = bnx2_nvram_write_dword(bp, addr, buf,
4368 cmd_flags);
4369
4370 if (rc != 0)
4371 goto nvram_write_end;
4372
4373 cmd_flags = 0;
4374 buf += 4;
4375 }
4376
4377 /* Loop to write back the buffer data from data_end
4378 * to page_end */
e30372c9 4379 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
b6016b76
MC
4380 for (addr = data_end; addr < page_end;
4381 addr += 4, i += 4) {
6aa20a22 4382
b6016b76
MC
4383 if (addr == page_end-4) {
4384 cmd_flags = BNX2_NVM_COMMAND_LAST;
4385 }
4386 rc = bnx2_nvram_write_dword(bp, addr,
4387 &flash_buffer[i], cmd_flags);
4388
4389 if (rc != 0)
4390 goto nvram_write_end;
4391
4392 cmd_flags = 0;
4393 }
4394 }
4395
4396 /* Disable writes to flash interface (lock write-protect) */
4397 bnx2_disable_nvram_write(bp);
4398
4399 /* Disable access to flash interface */
4400 bnx2_disable_nvram_access(bp);
4401 bnx2_release_nvram_lock(bp);
4402
4403 /* Increment written */
4404 written += data_end - data_start;
4405 }
4406
4407nvram_write_end:
e6be763f
MC
4408 kfree(flash_buffer);
4409 kfree(align_buf);
b6016b76
MC
4410 return rc;
4411}
4412
0d8a6571 4413static void
7c62e83b 4414bnx2_init_fw_cap(struct bnx2 *bp)
0d8a6571 4415{
7c62e83b 4416 u32 val, sig = 0;
0d8a6571 4417
583c28e5 4418 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
7c62e83b
MC
4419 bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4420
4421 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4422 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
0d8a6571 4423
2726d6e1 4424 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
0d8a6571
MC
4425 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4426 return;
4427
7c62e83b
MC
4428 if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4429 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4430 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4431 }
4432
4433 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4434 (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4435 u32 link;
4436
583c28e5 4437 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
0d8a6571 4438
7c62e83b
MC
4439 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4440 if (link & BNX2_LINK_STATUS_SERDES_LINK)
0d8a6571
MC
4441 bp->phy_port = PORT_FIBRE;
4442 else
4443 bp->phy_port = PORT_TP;
489310a4 4444
7c62e83b
MC
4445 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4446 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
0d8a6571 4447 }
7c62e83b
MC
4448
4449 if (netif_running(bp->dev) && sig)
4450 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
0d8a6571
MC
4451}
4452
b4b36042
MC
4453static void
4454bnx2_setup_msix_tbl(struct bnx2 *bp)
4455{
4456 REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4457
4458 REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4459 REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4460}
4461
b6016b76
MC
4462static int
4463bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4464{
4465 u32 val;
4466 int i, rc = 0;
489310a4 4467 u8 old_port;
b6016b76
MC
4468
4469 /* Wait for the current PCI transaction to complete before
4470 * issuing a reset. */
4471 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4472 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4473 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4474 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4475 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4476 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4477 udelay(5);
4478
b090ae2b 4479 /* Wait for the firmware to tell us it is ok to issue a reset. */
a2f13890 4480 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
b090ae2b 4481
b6016b76
MC
4482 /* Deposit a driver reset signature so the firmware knows that
4483 * this is a soft reset. */
2726d6e1
MC
4484 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4485 BNX2_DRV_RESET_SIGNATURE_MAGIC);
b6016b76 4486
b6016b76
MC
4487 /* Do a dummy read to force the chip to complete all current transaction
4488 * before we issue a reset. */
4489 val = REG_RD(bp, BNX2_MISC_ID);
4490
234754d5
MC
4491 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4492 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4493 REG_RD(bp, BNX2_MISC_COMMAND);
4494 udelay(5);
b6016b76 4495
234754d5
MC
4496 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4497 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
b6016b76 4498
234754d5 4499 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
b6016b76 4500
234754d5
MC
4501 } else {
4502 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4503 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4504 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4505
4506 /* Chip reset. */
4507 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4508
594a9dfa
MC
4509 /* Reading back any register after chip reset will hang the
4510 * bus on 5706 A0 and A1. The msleep below provides plenty
4511 * of margin for write posting.
4512 */
234754d5 4513 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
8e545881
AV
4514 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4515 msleep(20);
b6016b76 4516
234754d5
MC
4517 /* Reset takes approximate 30 usec */
4518 for (i = 0; i < 10; i++) {
4519 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4520 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4521 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4522 break;
4523 udelay(10);
4524 }
4525
4526 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4527 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4528 printk(KERN_ERR PFX "Chip reset did not complete\n");
4529 return -EBUSY;
4530 }
b6016b76
MC
4531 }
4532
4533 /* Make sure byte swapping is properly configured. */
4534 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4535 if (val != 0x01020304) {
4536 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4537 return -ENODEV;
4538 }
4539
b6016b76 4540 /* Wait for the firmware to finish its initialization. */
a2f13890 4541 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
b090ae2b
MC
4542 if (rc)
4543 return rc;
b6016b76 4544
0d8a6571 4545 spin_lock_bh(&bp->phy_lock);
489310a4 4546 old_port = bp->phy_port;
7c62e83b 4547 bnx2_init_fw_cap(bp);
583c28e5
MC
4548 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4549 old_port != bp->phy_port)
0d8a6571
MC
4550 bnx2_set_default_remote_link(bp);
4551 spin_unlock_bh(&bp->phy_lock);
4552
b6016b76
MC
4553 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4554 /* Adjust the voltage regular to two steps lower. The default
4555 * of this register is 0x0000000e. */
4556 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4557
4558 /* Remove bad rbuf memory from the free pool. */
4559 rc = bnx2_alloc_bad_rbuf(bp);
4560 }
4561
f86e82fb 4562 if (bp->flags & BNX2_FLAG_USING_MSIX)
b4b36042
MC
4563 bnx2_setup_msix_tbl(bp);
4564
b6016b76
MC
4565 return rc;
4566}
4567
4568static int
4569bnx2_init_chip(struct bnx2 *bp)
4570{
d8026d93 4571 u32 val, mtu;
b4b36042 4572 int rc, i;
b6016b76
MC
4573
4574 /* Make sure the interrupt is not active. */
4575 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4576
4577 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4578 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4579#ifdef __BIG_ENDIAN
6aa20a22 4580 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
b6016b76 4581#endif
6aa20a22 4582 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
b6016b76
MC
4583 DMA_READ_CHANS << 12 |
4584 DMA_WRITE_CHANS << 16;
4585
4586 val |= (0x2 << 20) | (1 << 11);
4587
f86e82fb 4588 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
b6016b76
MC
4589 val |= (1 << 23);
4590
4591 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
f86e82fb 4592 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
b6016b76
MC
4593 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4594
4595 REG_WR(bp, BNX2_DMA_CONFIG, val);
4596
4597 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4598 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4599 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4600 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4601 }
4602
f86e82fb 4603 if (bp->flags & BNX2_FLAG_PCIX) {
b6016b76
MC
4604 u16 val16;
4605
4606 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4607 &val16);
4608 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4609 val16 & ~PCI_X_CMD_ERO);
4610 }
4611
4612 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4613 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4614 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4615 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4616
4617 /* Initialize context mapping and zero out the quick contexts. The
4618 * context block must have already been enabled. */
641bdcd5
MC
4619 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4620 rc = bnx2_init_5709_context(bp);
4621 if (rc)
4622 return rc;
4623 } else
59b47d8a 4624 bnx2_init_context(bp);
b6016b76 4625
fba9fe91
MC
4626 if ((rc = bnx2_init_cpus(bp)) != 0)
4627 return rc;
4628
b6016b76
MC
4629 bnx2_init_nvram(bp);
4630
5fcaed01 4631 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
b6016b76
MC
4632
4633 val = REG_RD(bp, BNX2_MQ_CONFIG);
4634 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4635 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
68c9f75a
MC
4636 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4637 val |= BNX2_MQ_CONFIG_HALT_DIS;
4638
b6016b76
MC
4639 REG_WR(bp, BNX2_MQ_CONFIG, val);
4640
4641 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4642 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4643 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4644
4645 val = (BCM_PAGE_BITS - 8) << 24;
4646 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4647
4648 /* Configure page size. */
4649 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4650 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4651 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4652 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4653
4654 val = bp->mac_addr[0] +
4655 (bp->mac_addr[1] << 8) +
4656 (bp->mac_addr[2] << 16) +
4657 bp->mac_addr[3] +
4658 (bp->mac_addr[4] << 8) +
4659 (bp->mac_addr[5] << 16);
4660 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4661
4662 /* Program the MTU. Also include 4 bytes for CRC32. */
d8026d93
MC
4663 mtu = bp->dev->mtu;
4664 val = mtu + ETH_HLEN + ETH_FCS_LEN;
b6016b76
MC
4665 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4666 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4667 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4668
d8026d93
MC
4669 if (mtu < 1500)
4670 mtu = 1500;
4671
4672 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4673 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4674 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4675
b4b36042
MC
4676 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4677 bp->bnx2_napi[i].last_status_idx = 0;
4678
efba0180
MC
4679 bp->idle_chk_status_idx = 0xffff;
4680
b6016b76
MC
4681 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4682
4683 /* Set up how to generate a link change interrupt. */
4684 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4685
4686 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4687 (u64) bp->status_blk_mapping & 0xffffffff);
4688 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4689
4690 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4691 (u64) bp->stats_blk_mapping & 0xffffffff);
4692 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4693 (u64) bp->stats_blk_mapping >> 32);
4694
6aa20a22 4695 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
b6016b76
MC
4696 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4697
4698 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4699 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4700
4701 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4702 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4703
4704 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4705
4706 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4707
4708 REG_WR(bp, BNX2_HC_COM_TICKS,
4709 (bp->com_ticks_int << 16) | bp->com_ticks);
4710
4711 REG_WR(bp, BNX2_HC_CMD_TICKS,
4712 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4713
02537b06
MC
4714 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4715 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4716 else
7ea6920e 4717 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
b6016b76
MC
4718 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4719
4720 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
8e6a72c4 4721 val = BNX2_HC_CONFIG_COLLECT_STATS;
b6016b76 4722 else {
8e6a72c4
MC
4723 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4724 BNX2_HC_CONFIG_COLLECT_STATS;
b6016b76
MC
4725 }
4726
5e9ad9e1 4727 if (bp->irq_nvecs > 1) {
c76c0475
MC
4728 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4729 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4730
5e9ad9e1
MC
4731 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4732 }
4733
4734 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4735 val |= BNX2_HC_CONFIG_ONE_SHOT;
4736
4737 REG_WR(bp, BNX2_HC_CONFIG, val);
4738
4739 for (i = 1; i < bp->irq_nvecs; i++) {
4740 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4741 BNX2_HC_SB_CONFIG_1;
4742
6f743ca0 4743 REG_WR(bp, base,
c76c0475 4744 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5e9ad9e1 4745 BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
c76c0475
MC
4746 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4747
6f743ca0 4748 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
c76c0475
MC
4749 (bp->tx_quick_cons_trip_int << 16) |
4750 bp->tx_quick_cons_trip);
4751
6f743ca0 4752 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
c76c0475
MC
4753 (bp->tx_ticks_int << 16) | bp->tx_ticks);
4754
5e9ad9e1
MC
4755 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4756 (bp->rx_quick_cons_trip_int << 16) |
4757 bp->rx_quick_cons_trip);
8e6a72c4 4758
5e9ad9e1
MC
4759 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4760 (bp->rx_ticks_int << 16) | bp->rx_ticks);
4761 }
8e6a72c4 4762
b6016b76
MC
4763 /* Clear internal stats counters. */
4764 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4765
da3e4fbe 4766 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
b6016b76
MC
4767
4768 /* Initialize the receive filter. */
4769 bnx2_set_rx_mode(bp->dev);
4770
0aa38df7
MC
4771 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4772 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4773 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4774 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4775 }
b090ae2b 4776 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
a2f13890 4777 1, 0);
b6016b76 4778
df149d70 4779 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
b6016b76
MC
4780 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4781
4782 udelay(20);
4783
bf5295bb
MC
4784 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4785
b090ae2b 4786 return rc;
b6016b76
MC
4787}
4788
c76c0475
MC
4789static void
4790bnx2_clear_ring_states(struct bnx2 *bp)
4791{
4792 struct bnx2_napi *bnapi;
35e9010b 4793 struct bnx2_tx_ring_info *txr;
bb4f98ab 4794 struct bnx2_rx_ring_info *rxr;
c76c0475
MC
4795 int i;
4796
4797 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4798 bnapi = &bp->bnx2_napi[i];
35e9010b 4799 txr = &bnapi->tx_ring;
bb4f98ab 4800 rxr = &bnapi->rx_ring;
c76c0475 4801
35e9010b
MC
4802 txr->tx_cons = 0;
4803 txr->hw_tx_cons = 0;
bb4f98ab
MC
4804 rxr->rx_prod_bseq = 0;
4805 rxr->rx_prod = 0;
4806 rxr->rx_cons = 0;
4807 rxr->rx_pg_prod = 0;
4808 rxr->rx_pg_cons = 0;
c76c0475
MC
4809 }
4810}
4811
59b47d8a 4812static void
35e9010b 4813bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
59b47d8a
MC
4814{
4815 u32 val, offset0, offset1, offset2, offset3;
62a8313c 4816 u32 cid_addr = GET_CID_ADDR(cid);
59b47d8a
MC
4817
4818 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4819 offset0 = BNX2_L2CTX_TYPE_XI;
4820 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4821 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4822 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4823 } else {
4824 offset0 = BNX2_L2CTX_TYPE;
4825 offset1 = BNX2_L2CTX_CMD_TYPE;
4826 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4827 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4828 }
4829 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
62a8313c 4830 bnx2_ctx_wr(bp, cid_addr, offset0, val);
59b47d8a
MC
4831
4832 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
62a8313c 4833 bnx2_ctx_wr(bp, cid_addr, offset1, val);
59b47d8a 4834
35e9010b 4835 val = (u64) txr->tx_desc_mapping >> 32;
62a8313c 4836 bnx2_ctx_wr(bp, cid_addr, offset2, val);
59b47d8a 4837
35e9010b 4838 val = (u64) txr->tx_desc_mapping & 0xffffffff;
62a8313c 4839 bnx2_ctx_wr(bp, cid_addr, offset3, val);
59b47d8a 4840}
b6016b76
MC
4841
4842static void
35e9010b 4843bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
b6016b76
MC
4844{
4845 struct tx_bd *txbd;
c76c0475
MC
4846 u32 cid = TX_CID;
4847 struct bnx2_napi *bnapi;
35e9010b 4848 struct bnx2_tx_ring_info *txr;
c76c0475 4849
35e9010b
MC
4850 bnapi = &bp->bnx2_napi[ring_num];
4851 txr = &bnapi->tx_ring;
4852
4853 if (ring_num == 0)
4854 cid = TX_CID;
4855 else
4856 cid = TX_TSS_CID + ring_num - 1;
b6016b76 4857
2f8af120
MC
4858 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4859
35e9010b 4860 txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
6aa20a22 4861
35e9010b
MC
4862 txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
4863 txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
b6016b76 4864
35e9010b
MC
4865 txr->tx_prod = 0;
4866 txr->tx_prod_bseq = 0;
6aa20a22 4867
35e9010b
MC
4868 txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4869 txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
b6016b76 4870
35e9010b 4871 bnx2_init_tx_context(bp, cid, txr);
b6016b76
MC
4872}
4873
4874static void
5d5d0015
MC
4875bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4876 int num_rings)
b6016b76 4877{
b6016b76 4878 int i;
5d5d0015 4879 struct rx_bd *rxbd;
6aa20a22 4880
5d5d0015 4881 for (i = 0; i < num_rings; i++) {
13daffa2 4882 int j;
b6016b76 4883
5d5d0015 4884 rxbd = &rx_ring[i][0];
13daffa2 4885 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5d5d0015 4886 rxbd->rx_bd_len = buf_size;
13daffa2
MC
4887 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4888 }
5d5d0015 4889 if (i == (num_rings - 1))
13daffa2
MC
4890 j = 0;
4891 else
4892 j = i + 1;
5d5d0015
MC
4893 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4894 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
13daffa2 4895 }
5d5d0015
MC
4896}
4897
4898static void
bb4f98ab 4899bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5d5d0015
MC
4900{
4901 int i;
4902 u16 prod, ring_prod;
bb4f98ab
MC
4903 u32 cid, rx_cid_addr, val;
4904 struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
4905 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
4906
4907 if (ring_num == 0)
4908 cid = RX_CID;
4909 else
4910 cid = RX_RSS_CID + ring_num - 1;
4911
4912 rx_cid_addr = GET_CID_ADDR(cid);
5d5d0015 4913
bb4f98ab 4914 bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5d5d0015
MC
4915 bp->rx_buf_use_size, bp->rx_max_ring);
4916
bb4f98ab 4917 bnx2_init_rx_context(bp, cid);
83e3fc89
MC
4918
4919 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4920 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
4921 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
4922 }
4923
62a8313c 4924 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
47bf4246 4925 if (bp->rx_pg_ring_size) {
bb4f98ab
MC
4926 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
4927 rxr->rx_pg_desc_mapping,
47bf4246
MC
4928 PAGE_SIZE, bp->rx_max_pg_ring);
4929 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
62a8313c
MC
4930 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4931 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5e9ad9e1 4932 BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
47bf4246 4933
bb4f98ab 4934 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
62a8313c 4935 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
47bf4246 4936
bb4f98ab 4937 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
62a8313c 4938 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
47bf4246
MC
4939
4940 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4941 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4942 }
b6016b76 4943
bb4f98ab 4944 val = (u64) rxr->rx_desc_mapping[0] >> 32;
62a8313c 4945 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
b6016b76 4946
bb4f98ab 4947 val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
62a8313c 4948 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
b6016b76 4949
bb4f98ab 4950 ring_prod = prod = rxr->rx_pg_prod;
47bf4246 4951 for (i = 0; i < bp->rx_pg_ring_size; i++) {
bb4f98ab 4952 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0)
47bf4246
MC
4953 break;
4954 prod = NEXT_RX_BD(prod);
4955 ring_prod = RX_PG_RING_IDX(prod);
4956 }
bb4f98ab 4957 rxr->rx_pg_prod = prod;
47bf4246 4958
bb4f98ab 4959 ring_prod = prod = rxr->rx_prod;
236b6394 4960 for (i = 0; i < bp->rx_ring_size; i++) {
bb4f98ab 4961 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0)
b6016b76 4962 break;
b6016b76
MC
4963 prod = NEXT_RX_BD(prod);
4964 ring_prod = RX_RING_IDX(prod);
4965 }
bb4f98ab 4966 rxr->rx_prod = prod;
b6016b76 4967
bb4f98ab
MC
4968 rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
4969 rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
4970 rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
b6016b76 4971
bb4f98ab
MC
4972 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
4973 REG_WR16(bp, rxr->rx_bidx_addr, prod);
4974
4975 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
b6016b76
MC
4976}
4977
35e9010b
MC
4978static void
4979bnx2_init_all_rings(struct bnx2 *bp)
4980{
4981 int i;
5e9ad9e1 4982 u32 val;
35e9010b
MC
4983
4984 bnx2_clear_ring_states(bp);
4985
4986 REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
4987 for (i = 0; i < bp->num_tx_rings; i++)
4988 bnx2_init_tx_ring(bp, i);
4989
4990 if (bp->num_tx_rings > 1)
4991 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
4992 (TX_TSS_CID << 7));
4993
5e9ad9e1
MC
4994 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
4995 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
4996
bb4f98ab
MC
4997 for (i = 0; i < bp->num_rx_rings; i++)
4998 bnx2_init_rx_ring(bp, i);
5e9ad9e1
MC
4999
5000 if (bp->num_rx_rings > 1) {
5001 u32 tbl_32;
5002 u8 *tbl = (u8 *) &tbl_32;
5003
5004 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
5005 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
5006
5007 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5008 tbl[i % 4] = i % (bp->num_rx_rings - 1);
5009 if ((i % 4) == 3)
5010 bnx2_reg_wr_ind(bp,
5011 BNX2_RXP_SCRATCH_RSS_TBL + i,
5012 cpu_to_be32(tbl_32));
5013 }
5014
5015 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5016 BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5017
5018 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5019
5020 }
35e9010b
MC
5021}
5022
5d5d0015 5023static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
13daffa2 5024{
5d5d0015 5025 u32 max, num_rings = 1;
13daffa2 5026
5d5d0015
MC
5027 while (ring_size > MAX_RX_DESC_CNT) {
5028 ring_size -= MAX_RX_DESC_CNT;
13daffa2
MC
5029 num_rings++;
5030 }
5031 /* round to next power of 2 */
5d5d0015 5032 max = max_size;
13daffa2
MC
5033 while ((max & num_rings) == 0)
5034 max >>= 1;
5035
5036 if (num_rings != max)
5037 max <<= 1;
5038
5d5d0015
MC
5039 return max;
5040}
5041
5042static void
5043bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5044{
84eaa187 5045 u32 rx_size, rx_space, jumbo_size;
5d5d0015
MC
5046
5047 /* 8 for CRC and VLAN */
d89cb6af 5048 rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5d5d0015 5049
84eaa187
MC
5050 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5051 sizeof(struct skb_shared_info);
5052
601d3d18 5053 bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
47bf4246
MC
5054 bp->rx_pg_ring_size = 0;
5055 bp->rx_max_pg_ring = 0;
5056 bp->rx_max_pg_ring_idx = 0;
f86e82fb 5057 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
84eaa187
MC
5058 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5059
5060 jumbo_size = size * pages;
5061 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5062 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5063
5064 bp->rx_pg_ring_size = jumbo_size;
5065 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5066 MAX_RX_PG_RINGS);
5067 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
601d3d18 5068 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
84eaa187
MC
5069 bp->rx_copy_thresh = 0;
5070 }
5d5d0015
MC
5071
5072 bp->rx_buf_use_size = rx_size;
5073 /* hw alignment */
5074 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
d89cb6af 5075 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5d5d0015
MC
5076 bp->rx_ring_size = size;
5077 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
13daffa2
MC
5078 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5079}
5080
b6016b76
MC
5081static void
5082bnx2_free_tx_skbs(struct bnx2 *bp)
5083{
5084 int i;
5085
35e9010b
MC
5086 for (i = 0; i < bp->num_tx_rings; i++) {
5087 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5088 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5089 int j;
b6016b76 5090
35e9010b 5091 if (txr->tx_buf_ring == NULL)
b6016b76 5092 continue;
b6016b76 5093
35e9010b 5094 for (j = 0; j < TX_DESC_CNT; ) {
3d16af86 5095 struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
35e9010b 5096 struct sk_buff *skb = tx_buf->skb;
35e9010b
MC
5097
5098 if (skb == NULL) {
5099 j++;
5100 continue;
5101 }
5102
3d16af86 5103 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
b6016b76 5104
35e9010b 5105 tx_buf->skb = NULL;
b6016b76 5106
3d16af86 5107 j += skb_shinfo(skb)->nr_frags + 1;
35e9010b 5108 dev_kfree_skb(skb);
b6016b76 5109 }
b6016b76 5110 }
b6016b76
MC
5111}
5112
5113static void
5114bnx2_free_rx_skbs(struct bnx2 *bp)
5115{
5116 int i;
5117
bb4f98ab
MC
5118 for (i = 0; i < bp->num_rx_rings; i++) {
5119 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5120 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5121 int j;
b6016b76 5122
bb4f98ab
MC
5123 if (rxr->rx_buf_ring == NULL)
5124 return;
b6016b76 5125
bb4f98ab
MC
5126 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5127 struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5128 struct sk_buff *skb = rx_buf->skb;
b6016b76 5129
bb4f98ab
MC
5130 if (skb == NULL)
5131 continue;
b6016b76 5132
bb4f98ab
MC
5133 pci_unmap_single(bp->pdev,
5134 pci_unmap_addr(rx_buf, mapping),
5135 bp->rx_buf_use_size,
5136 PCI_DMA_FROMDEVICE);
b6016b76 5137
bb4f98ab
MC
5138 rx_buf->skb = NULL;
5139
5140 dev_kfree_skb(skb);
5141 }
5142 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5143 bnx2_free_rx_page(bp, rxr, j);
b6016b76
MC
5144 }
5145}
5146
5147static void
5148bnx2_free_skbs(struct bnx2 *bp)
5149{
5150 bnx2_free_tx_skbs(bp);
5151 bnx2_free_rx_skbs(bp);
5152}
5153
5154static int
5155bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5156{
5157 int rc;
5158
5159 rc = bnx2_reset_chip(bp, reset_code);
5160 bnx2_free_skbs(bp);
5161 if (rc)
5162 return rc;
5163
fba9fe91
MC
5164 if ((rc = bnx2_init_chip(bp)) != 0)
5165 return rc;
5166
35e9010b 5167 bnx2_init_all_rings(bp);
b6016b76
MC
5168 return 0;
5169}
5170
5171static int
9a120bc5 5172bnx2_init_nic(struct bnx2 *bp, int reset_phy)
b6016b76
MC
5173{
5174 int rc;
5175
5176 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5177 return rc;
5178
80be4434 5179 spin_lock_bh(&bp->phy_lock);
9a120bc5 5180 bnx2_init_phy(bp, reset_phy);
b6016b76 5181 bnx2_set_link(bp);
543a827d
MC
5182 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5183 bnx2_remote_phy_event(bp);
0d8a6571 5184 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5185 return 0;
5186}
5187
74bf4ba3
MC
5188static int
5189bnx2_shutdown_chip(struct bnx2 *bp)
5190{
5191 u32 reset_code;
5192
5193 if (bp->flags & BNX2_FLAG_NO_WOL)
5194 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5195 else if (bp->wol)
5196 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5197 else
5198 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5199
5200 return bnx2_reset_chip(bp, reset_code);
5201}
5202
b6016b76
MC
5203static int
5204bnx2_test_registers(struct bnx2 *bp)
5205{
5206 int ret;
5bae30c9 5207 int i, is_5709;
f71e1309 5208 static const struct {
b6016b76
MC
5209 u16 offset;
5210 u16 flags;
5bae30c9 5211#define BNX2_FL_NOT_5709 1
b6016b76
MC
5212 u32 rw_mask;
5213 u32 ro_mask;
5214 } reg_tbl[] = {
5215 { 0x006c, 0, 0x00000000, 0x0000003f },
5216 { 0x0090, 0, 0xffffffff, 0x00000000 },
5217 { 0x0094, 0, 0x00000000, 0x00000000 },
5218
5bae30c9
MC
5219 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5220 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5221 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5222 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5223 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5224 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5225 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5226 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5227 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5228
5229 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5230 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5231 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5232 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5233 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5234 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5235
5236 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5237 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5238 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
b6016b76
MC
5239
5240 { 0x1000, 0, 0x00000000, 0x00000001 },
15b169cc 5241 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
b6016b76
MC
5242
5243 { 0x1408, 0, 0x01c00800, 0x00000000 },
5244 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5245 { 0x14a8, 0, 0x00000000, 0x000001ff },
5b0c76ad 5246 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
b6016b76
MC
5247 { 0x14b0, 0, 0x00000002, 0x00000001 },
5248 { 0x14b8, 0, 0x00000000, 0x00000000 },
5249 { 0x14c0, 0, 0x00000000, 0x00000009 },
5250 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5251 { 0x14cc, 0, 0x00000000, 0x00000001 },
5252 { 0x14d0, 0, 0xffffffff, 0x00000000 },
b6016b76
MC
5253
5254 { 0x1800, 0, 0x00000000, 0x00000001 },
5255 { 0x1804, 0, 0x00000000, 0x00000003 },
b6016b76
MC
5256
5257 { 0x2800, 0, 0x00000000, 0x00000001 },
5258 { 0x2804, 0, 0x00000000, 0x00003f01 },
5259 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5260 { 0x2810, 0, 0xffff0000, 0x00000000 },
5261 { 0x2814, 0, 0xffff0000, 0x00000000 },
5262 { 0x2818, 0, 0xffff0000, 0x00000000 },
5263 { 0x281c, 0, 0xffff0000, 0x00000000 },
5264 { 0x2834, 0, 0xffffffff, 0x00000000 },
5265 { 0x2840, 0, 0x00000000, 0xffffffff },
5266 { 0x2844, 0, 0x00000000, 0xffffffff },
5267 { 0x2848, 0, 0xffffffff, 0x00000000 },
5268 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5269
5270 { 0x2c00, 0, 0x00000000, 0x00000011 },
5271 { 0x2c04, 0, 0x00000000, 0x00030007 },
5272
b6016b76
MC
5273 { 0x3c00, 0, 0x00000000, 0x00000001 },
5274 { 0x3c04, 0, 0x00000000, 0x00070000 },
5275 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5276 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5277 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5278 { 0x3c14, 0, 0x00000000, 0xffffffff },
5279 { 0x3c18, 0, 0x00000000, 0xffffffff },
5280 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5281 { 0x3c20, 0, 0xffffff00, 0x00000000 },
b6016b76
MC
5282
5283 { 0x5004, 0, 0x00000000, 0x0000007f },
5284 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
b6016b76 5285
b6016b76
MC
5286 { 0x5c00, 0, 0x00000000, 0x00000001 },
5287 { 0x5c04, 0, 0x00000000, 0x0003000f },
5288 { 0x5c08, 0, 0x00000003, 0x00000000 },
5289 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5290 { 0x5c10, 0, 0x00000000, 0xffffffff },
5291 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5292 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5293 { 0x5c88, 0, 0x00000000, 0x00077373 },
5294 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5295
5296 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5297 { 0x680c, 0, 0xffffffff, 0x00000000 },
5298 { 0x6810, 0, 0xffffffff, 0x00000000 },
5299 { 0x6814, 0, 0xffffffff, 0x00000000 },
5300 { 0x6818, 0, 0xffffffff, 0x00000000 },
5301 { 0x681c, 0, 0xffffffff, 0x00000000 },
5302 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5303 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5304 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5305 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5306 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5307 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5308 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5309 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5310 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5311 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5312 { 0x684c, 0, 0xffffffff, 0x00000000 },
5313 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5314 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5315 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5316 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5317 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5318 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5319
5320 { 0xffff, 0, 0x00000000, 0x00000000 },
5321 };
5322
5323 ret = 0;
5bae30c9
MC
5324 is_5709 = 0;
5325 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5326 is_5709 = 1;
5327
b6016b76
MC
5328 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5329 u32 offset, rw_mask, ro_mask, save_val, val;
5bae30c9
MC
5330 u16 flags = reg_tbl[i].flags;
5331
5332 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5333 continue;
b6016b76
MC
5334
5335 offset = (u32) reg_tbl[i].offset;
5336 rw_mask = reg_tbl[i].rw_mask;
5337 ro_mask = reg_tbl[i].ro_mask;
5338
14ab9b86 5339 save_val = readl(bp->regview + offset);
b6016b76 5340
14ab9b86 5341 writel(0, bp->regview + offset);
b6016b76 5342
14ab9b86 5343 val = readl(bp->regview + offset);
b6016b76
MC
5344 if ((val & rw_mask) != 0) {
5345 goto reg_test_err;
5346 }
5347
5348 if ((val & ro_mask) != (save_val & ro_mask)) {
5349 goto reg_test_err;
5350 }
5351
14ab9b86 5352 writel(0xffffffff, bp->regview + offset);
b6016b76 5353
14ab9b86 5354 val = readl(bp->regview + offset);
b6016b76
MC
5355 if ((val & rw_mask) != rw_mask) {
5356 goto reg_test_err;
5357 }
5358
5359 if ((val & ro_mask) != (save_val & ro_mask)) {
5360 goto reg_test_err;
5361 }
5362
14ab9b86 5363 writel(save_val, bp->regview + offset);
b6016b76
MC
5364 continue;
5365
5366reg_test_err:
14ab9b86 5367 writel(save_val, bp->regview + offset);
b6016b76
MC
5368 ret = -ENODEV;
5369 break;
5370 }
5371 return ret;
5372}
5373
5374static int
5375bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5376{
f71e1309 5377 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
b6016b76
MC
5378 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5379 int i;
5380
5381 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5382 u32 offset;
5383
5384 for (offset = 0; offset < size; offset += 4) {
5385
2726d6e1 5386 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
b6016b76 5387
2726d6e1 5388 if (bnx2_reg_rd_ind(bp, start + offset) !=
b6016b76
MC
5389 test_pattern[i]) {
5390 return -ENODEV;
5391 }
5392 }
5393 }
5394 return 0;
5395}
5396
5397static int
5398bnx2_test_memory(struct bnx2 *bp)
5399{
5400 int ret = 0;
5401 int i;
5bae30c9 5402 static struct mem_entry {
b6016b76
MC
5403 u32 offset;
5404 u32 len;
5bae30c9 5405 } mem_tbl_5706[] = {
b6016b76 5406 { 0x60000, 0x4000 },
5b0c76ad 5407 { 0xa0000, 0x3000 },
b6016b76
MC
5408 { 0xe0000, 0x4000 },
5409 { 0x120000, 0x4000 },
5410 { 0x1a0000, 0x4000 },
5411 { 0x160000, 0x4000 },
5412 { 0xffffffff, 0 },
5bae30c9
MC
5413 },
5414 mem_tbl_5709[] = {
5415 { 0x60000, 0x4000 },
5416 { 0xa0000, 0x3000 },
5417 { 0xe0000, 0x4000 },
5418 { 0x120000, 0x4000 },
5419 { 0x1a0000, 0x4000 },
5420 { 0xffffffff, 0 },
b6016b76 5421 };
5bae30c9
MC
5422 struct mem_entry *mem_tbl;
5423
5424 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5425 mem_tbl = mem_tbl_5709;
5426 else
5427 mem_tbl = mem_tbl_5706;
b6016b76
MC
5428
5429 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5430 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5431 mem_tbl[i].len)) != 0) {
5432 return ret;
5433 }
5434 }
6aa20a22 5435
b6016b76
MC
5436 return ret;
5437}
5438
bc5a0690
MC
5439#define BNX2_MAC_LOOPBACK 0
5440#define BNX2_PHY_LOOPBACK 1
5441
b6016b76 5442static int
bc5a0690 5443bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
b6016b76
MC
5444{
5445 unsigned int pkt_size, num_pkts, i;
5446 struct sk_buff *skb, *rx_skb;
5447 unsigned char *packet;
bc5a0690 5448 u16 rx_start_idx, rx_idx;
b6016b76
MC
5449 dma_addr_t map;
5450 struct tx_bd *txbd;
5451 struct sw_bd *rx_buf;
5452 struct l2_fhdr *rx_hdr;
5453 int ret = -ENODEV;
c76c0475 5454 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
35e9010b 5455 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
bb4f98ab 5456 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
c76c0475
MC
5457
5458 tx_napi = bnapi;
b6016b76 5459
35e9010b 5460 txr = &tx_napi->tx_ring;
bb4f98ab 5461 rxr = &bnapi->rx_ring;
bc5a0690
MC
5462 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5463 bp->loopback = MAC_LOOPBACK;
5464 bnx2_set_mac_loopback(bp);
5465 }
5466 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
583c28e5 5467 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
489310a4
MC
5468 return 0;
5469
80be4434 5470 bp->loopback = PHY_LOOPBACK;
bc5a0690
MC
5471 bnx2_set_phy_loopback(bp);
5472 }
5473 else
5474 return -EINVAL;
b6016b76 5475
84eaa187 5476 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
932f3772 5477 skb = netdev_alloc_skb(bp->dev, pkt_size);
b6cbc3b6
JL
5478 if (!skb)
5479 return -ENOMEM;
b6016b76 5480 packet = skb_put(skb, pkt_size);
6634292b 5481 memcpy(packet, bp->dev->dev_addr, 6);
b6016b76
MC
5482 memset(packet + 6, 0x0, 8);
5483 for (i = 14; i < pkt_size; i++)
5484 packet[i] = (unsigned char) (i & 0xff);
5485
3d16af86
BL
5486 if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
5487 dev_kfree_skb(skb);
5488 return -EIO;
5489 }
042a53a9 5490 map = skb_shinfo(skb)->dma_head;
b6016b76 5491
bf5295bb
MC
5492 REG_WR(bp, BNX2_HC_COMMAND,
5493 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5494
b6016b76
MC
5495 REG_RD(bp, BNX2_HC_COMMAND);
5496
5497 udelay(5);
35efa7c1 5498 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
b6016b76 5499
b6016b76
MC
5500 num_pkts = 0;
5501
35e9010b 5502 txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
b6016b76
MC
5503
5504 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5505 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5506 txbd->tx_bd_mss_nbytes = pkt_size;
5507 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5508
5509 num_pkts++;
35e9010b
MC
5510 txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5511 txr->tx_prod_bseq += pkt_size;
b6016b76 5512
35e9010b
MC
5513 REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5514 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
b6016b76
MC
5515
5516 udelay(100);
5517
bf5295bb
MC
5518 REG_WR(bp, BNX2_HC_COMMAND,
5519 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5520
b6016b76
MC
5521 REG_RD(bp, BNX2_HC_COMMAND);
5522
5523 udelay(5);
5524
3d16af86 5525 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
745720e5 5526 dev_kfree_skb(skb);
b6016b76 5527
35e9010b 5528 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
b6016b76 5529 goto loopback_test_done;
b6016b76 5530
35efa7c1 5531 rx_idx = bnx2_get_hw_rx_cons(bnapi);
b6016b76
MC
5532 if (rx_idx != rx_start_idx + num_pkts) {
5533 goto loopback_test_done;
5534 }
5535
bb4f98ab 5536 rx_buf = &rxr->rx_buf_ring[rx_start_idx];
b6016b76
MC
5537 rx_skb = rx_buf->skb;
5538
5539 rx_hdr = (struct l2_fhdr *) rx_skb->data;
d89cb6af 5540 skb_reserve(rx_skb, BNX2_RX_OFFSET);
b6016b76
MC
5541
5542 pci_dma_sync_single_for_cpu(bp->pdev,
5543 pci_unmap_addr(rx_buf, mapping),
5544 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5545
ade2bfe7 5546 if (rx_hdr->l2_fhdr_status &
b6016b76
MC
5547 (L2_FHDR_ERRORS_BAD_CRC |
5548 L2_FHDR_ERRORS_PHY_DECODE |
5549 L2_FHDR_ERRORS_ALIGNMENT |
5550 L2_FHDR_ERRORS_TOO_SHORT |
5551 L2_FHDR_ERRORS_GIANT_FRAME)) {
5552
5553 goto loopback_test_done;
5554 }
5555
5556 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5557 goto loopback_test_done;
5558 }
5559
5560 for (i = 14; i < pkt_size; i++) {
5561 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5562 goto loopback_test_done;
5563 }
5564 }
5565
5566 ret = 0;
5567
5568loopback_test_done:
5569 bp->loopback = 0;
5570 return ret;
5571}
5572
bc5a0690
MC
5573#define BNX2_MAC_LOOPBACK_FAILED 1
5574#define BNX2_PHY_LOOPBACK_FAILED 2
5575#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5576 BNX2_PHY_LOOPBACK_FAILED)
5577
5578static int
5579bnx2_test_loopback(struct bnx2 *bp)
5580{
5581 int rc = 0;
5582
5583 if (!netif_running(bp->dev))
5584 return BNX2_LOOPBACK_FAILED;
5585
5586 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5587 spin_lock_bh(&bp->phy_lock);
9a120bc5 5588 bnx2_init_phy(bp, 1);
bc5a0690
MC
5589 spin_unlock_bh(&bp->phy_lock);
5590 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5591 rc |= BNX2_MAC_LOOPBACK_FAILED;
5592 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5593 rc |= BNX2_PHY_LOOPBACK_FAILED;
5594 return rc;
5595}
5596
b6016b76
MC
5597#define NVRAM_SIZE 0x200
5598#define CRC32_RESIDUAL 0xdebb20e3
5599
5600static int
5601bnx2_test_nvram(struct bnx2 *bp)
5602{
b491edd5 5603 __be32 buf[NVRAM_SIZE / 4];
b6016b76
MC
5604 u8 *data = (u8 *) buf;
5605 int rc = 0;
5606 u32 magic, csum;
5607
5608 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5609 goto test_nvram_done;
5610
5611 magic = be32_to_cpu(buf[0]);
5612 if (magic != 0x669955aa) {
5613 rc = -ENODEV;
5614 goto test_nvram_done;
5615 }
5616
5617 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5618 goto test_nvram_done;
5619
5620 csum = ether_crc_le(0x100, data);
5621 if (csum != CRC32_RESIDUAL) {
5622 rc = -ENODEV;
5623 goto test_nvram_done;
5624 }
5625
5626 csum = ether_crc_le(0x100, data + 0x100);
5627 if (csum != CRC32_RESIDUAL) {
5628 rc = -ENODEV;
5629 }
5630
5631test_nvram_done:
5632 return rc;
5633}
5634
5635static int
5636bnx2_test_link(struct bnx2 *bp)
5637{
5638 u32 bmsr;
5639
9f52b564
MC
5640 if (!netif_running(bp->dev))
5641 return -ENODEV;
5642
583c28e5 5643 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
489310a4
MC
5644 if (bp->link_up)
5645 return 0;
5646 return -ENODEV;
5647 }
c770a65c 5648 spin_lock_bh(&bp->phy_lock);
27a005b8
MC
5649 bnx2_enable_bmsr1(bp);
5650 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5651 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5652 bnx2_disable_bmsr1(bp);
c770a65c 5653 spin_unlock_bh(&bp->phy_lock);
6aa20a22 5654
b6016b76
MC
5655 if (bmsr & BMSR_LSTATUS) {
5656 return 0;
5657 }
5658 return -ENODEV;
5659}
5660
5661static int
5662bnx2_test_intr(struct bnx2 *bp)
5663{
5664 int i;
b6016b76
MC
5665 u16 status_idx;
5666
5667 if (!netif_running(bp->dev))
5668 return -ENODEV;
5669
5670 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5671
5672 /* This register is not touched during run-time. */
bf5295bb 5673 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
b6016b76
MC
5674 REG_RD(bp, BNX2_HC_COMMAND);
5675
5676 for (i = 0; i < 10; i++) {
5677 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5678 status_idx) {
5679
5680 break;
5681 }
5682
5683 msleep_interruptible(10);
5684 }
5685 if (i < 10)
5686 return 0;
5687
5688 return -ENODEV;
5689}
5690
38ea3686 5691/* Determining link for parallel detection. */
b2fadeae
MC
5692static int
5693bnx2_5706_serdes_has_link(struct bnx2 *bp)
5694{
5695 u32 mode_ctl, an_dbg, exp;
5696
38ea3686
MC
5697 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5698 return 0;
5699
b2fadeae
MC
5700 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5701 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5702
5703 if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5704 return 0;
5705
5706 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5707 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5708 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5709
f3014c0c 5710 if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
b2fadeae
MC
5711 return 0;
5712
5713 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5714 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5715 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5716
5717 if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
5718 return 0;
5719
5720 return 1;
5721}
5722
b6016b76 5723static void
48b01e2d 5724bnx2_5706_serdes_timer(struct bnx2 *bp)
b6016b76 5725{
b2fadeae
MC
5726 int check_link = 1;
5727
48b01e2d 5728 spin_lock(&bp->phy_lock);
b2fadeae 5729 if (bp->serdes_an_pending) {
48b01e2d 5730 bp->serdes_an_pending--;
b2fadeae
MC
5731 check_link = 0;
5732 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
48b01e2d 5733 u32 bmcr;
b6016b76 5734
ac392abc 5735 bp->current_interval = BNX2_TIMER_INTERVAL;
cd339a0e 5736
ca58c3af 5737 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76 5738
48b01e2d 5739 if (bmcr & BMCR_ANENABLE) {
b2fadeae 5740 if (bnx2_5706_serdes_has_link(bp)) {
48b01e2d
MC
5741 bmcr &= ~BMCR_ANENABLE;
5742 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
ca58c3af 5743 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
583c28e5 5744 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
48b01e2d 5745 }
b6016b76 5746 }
48b01e2d
MC
5747 }
5748 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
583c28e5 5749 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
48b01e2d 5750 u32 phy2;
b6016b76 5751
48b01e2d
MC
5752 bnx2_write_phy(bp, 0x17, 0x0f01);
5753 bnx2_read_phy(bp, 0x15, &phy2);
5754 if (phy2 & 0x20) {
5755 u32 bmcr;
cd339a0e 5756
ca58c3af 5757 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
48b01e2d 5758 bmcr |= BMCR_ANENABLE;
ca58c3af 5759 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
b6016b76 5760
583c28e5 5761 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
48b01e2d
MC
5762 }
5763 } else
ac392abc 5764 bp->current_interval = BNX2_TIMER_INTERVAL;
b6016b76 5765
a2724e25 5766 if (check_link) {
b2fadeae
MC
5767 u32 val;
5768
5769 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5770 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5771 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5772
a2724e25
MC
5773 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5774 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5775 bnx2_5706s_force_link_dn(bp, 1);
5776 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5777 } else
5778 bnx2_set_link(bp);
5779 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
5780 bnx2_set_link(bp);
b2fadeae 5781 }
48b01e2d
MC
5782 spin_unlock(&bp->phy_lock);
5783}
b6016b76 5784
f8dd064e
MC
5785static void
5786bnx2_5708_serdes_timer(struct bnx2 *bp)
5787{
583c28e5 5788 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
5789 return;
5790
583c28e5 5791 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
f8dd064e
MC
5792 bp->serdes_an_pending = 0;
5793 return;
5794 }
b6016b76 5795
f8dd064e
MC
5796 spin_lock(&bp->phy_lock);
5797 if (bp->serdes_an_pending)
5798 bp->serdes_an_pending--;
5799 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5800 u32 bmcr;
b6016b76 5801
ca58c3af 5802 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
f8dd064e 5803 if (bmcr & BMCR_ANENABLE) {
605a9e20 5804 bnx2_enable_forced_2g5(bp);
40105c0b 5805 bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
f8dd064e 5806 } else {
605a9e20 5807 bnx2_disable_forced_2g5(bp);
f8dd064e 5808 bp->serdes_an_pending = 2;
ac392abc 5809 bp->current_interval = BNX2_TIMER_INTERVAL;
b6016b76 5810 }
b6016b76 5811
f8dd064e 5812 } else
ac392abc 5813 bp->current_interval = BNX2_TIMER_INTERVAL;
b6016b76 5814
f8dd064e
MC
5815 spin_unlock(&bp->phy_lock);
5816}
5817
48b01e2d
MC
5818static void
5819bnx2_timer(unsigned long data)
5820{
5821 struct bnx2 *bp = (struct bnx2 *) data;
b6016b76 5822
48b01e2d
MC
5823 if (!netif_running(bp->dev))
5824 return;
b6016b76 5825
48b01e2d
MC
5826 if (atomic_read(&bp->intr_sem) != 0)
5827 goto bnx2_restart_timer;
b6016b76 5828
efba0180
MC
5829 if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
5830 BNX2_FLAG_USING_MSI)
5831 bnx2_chk_missed_msi(bp);
5832
df149d70 5833 bnx2_send_heart_beat(bp);
b6016b76 5834
2726d6e1
MC
5835 bp->stats_blk->stat_FwRxDrop =
5836 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
b6016b76 5837
02537b06
MC
5838 /* workaround occasional corrupted counters */
5839 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5840 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5841 BNX2_HC_COMMAND_STATS_NOW);
5842
583c28e5 5843 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
f8dd064e
MC
5844 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5845 bnx2_5706_serdes_timer(bp);
27a005b8 5846 else
f8dd064e 5847 bnx2_5708_serdes_timer(bp);
b6016b76
MC
5848 }
5849
5850bnx2_restart_timer:
cd339a0e 5851 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
5852}
5853
8e6a72c4
MC
5854static int
5855bnx2_request_irq(struct bnx2 *bp)
5856{
6d866ffc 5857 unsigned long flags;
b4b36042
MC
5858 struct bnx2_irq *irq;
5859 int rc = 0, i;
8e6a72c4 5860
f86e82fb 5861 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6d866ffc
MC
5862 flags = 0;
5863 else
5864 flags = IRQF_SHARED;
b4b36042
MC
5865
5866 for (i = 0; i < bp->irq_nvecs; i++) {
5867 irq = &bp->irq_tbl[i];
c76c0475 5868 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
f0ea2e63 5869 &bp->bnx2_napi[i]);
b4b36042
MC
5870 if (rc)
5871 break;
5872 irq->requested = 1;
5873 }
8e6a72c4
MC
5874 return rc;
5875}
5876
5877static void
5878bnx2_free_irq(struct bnx2 *bp)
5879{
b4b36042
MC
5880 struct bnx2_irq *irq;
5881 int i;
8e6a72c4 5882
b4b36042
MC
5883 for (i = 0; i < bp->irq_nvecs; i++) {
5884 irq = &bp->irq_tbl[i];
5885 if (irq->requested)
f0ea2e63 5886 free_irq(irq->vector, &bp->bnx2_napi[i]);
b4b36042 5887 irq->requested = 0;
6d866ffc 5888 }
f86e82fb 5889 if (bp->flags & BNX2_FLAG_USING_MSI)
b4b36042 5890 pci_disable_msi(bp->pdev);
f86e82fb 5891 else if (bp->flags & BNX2_FLAG_USING_MSIX)
b4b36042
MC
5892 pci_disable_msix(bp->pdev);
5893
f86e82fb 5894 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
b4b36042
MC
5895}
5896
5897static void
5e9ad9e1 5898bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
b4b36042 5899{
57851d84
MC
5900 int i, rc;
5901 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
4e1d0de9
MC
5902 struct net_device *dev = bp->dev;
5903 const int len = sizeof(bp->irq_tbl[0].name);
57851d84 5904
b4b36042
MC
5905 bnx2_setup_msix_tbl(bp);
5906 REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
5907 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
5908 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
57851d84
MC
5909
5910 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5911 msix_ent[i].entry = i;
5912 msix_ent[i].vector = 0;
5913 }
5914
5915 rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
5916 if (rc != 0)
5917 return;
5918
5e9ad9e1 5919 bp->irq_nvecs = msix_vecs;
f86e82fb 5920 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
69010313 5921 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
57851d84 5922 bp->irq_tbl[i].vector = msix_ent[i].vector;
69010313
MC
5923 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
5924 bp->irq_tbl[i].handler = bnx2_msi_1shot;
5925 }
6d866ffc
MC
5926}
5927
5928static void
5929bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5930{
5e9ad9e1 5931 int cpus = num_online_cpus();
706bf240 5932 int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
5e9ad9e1 5933
6d866ffc
MC
5934 bp->irq_tbl[0].handler = bnx2_interrupt;
5935 strcpy(bp->irq_tbl[0].name, bp->dev->name);
b4b36042
MC
5936 bp->irq_nvecs = 1;
5937 bp->irq_tbl[0].vector = bp->pdev->irq;
5938
5e9ad9e1
MC
5939 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
5940 bnx2_enable_msix(bp, msix_vecs);
6d866ffc 5941
f86e82fb
DM
5942 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
5943 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6d866ffc 5944 if (pci_enable_msi(bp->pdev) == 0) {
f86e82fb 5945 bp->flags |= BNX2_FLAG_USING_MSI;
6d866ffc 5946 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
f86e82fb 5947 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6d866ffc
MC
5948 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5949 } else
5950 bp->irq_tbl[0].handler = bnx2_msi;
b4b36042
MC
5951
5952 bp->irq_tbl[0].vector = bp->pdev->irq;
6d866ffc
MC
5953 }
5954 }
706bf240
BL
5955
5956 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
5957 bp->dev->real_num_tx_queues = bp->num_tx_rings;
5958
5e9ad9e1 5959 bp->num_rx_rings = bp->irq_nvecs;
8e6a72c4
MC
5960}
5961
b6016b76
MC
5962/* Called with rtnl_lock */
5963static int
5964bnx2_open(struct net_device *dev)
5965{
972ec0d4 5966 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5967 int rc;
5968
1b2f922f
MC
5969 netif_carrier_off(dev);
5970
829ca9a3 5971 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
5972 bnx2_disable_int(bp);
5973
35e9010b
MC
5974 bnx2_setup_int_mode(bp, disable_msi);
5975 bnx2_napi_enable(bp);
b6016b76 5976 rc = bnx2_alloc_mem(bp);
2739a8bb
MC
5977 if (rc)
5978 goto open_err;
b6016b76 5979
8e6a72c4 5980 rc = bnx2_request_irq(bp);
2739a8bb
MC
5981 if (rc)
5982 goto open_err;
b6016b76 5983
9a120bc5 5984 rc = bnx2_init_nic(bp, 1);
2739a8bb
MC
5985 if (rc)
5986 goto open_err;
6aa20a22 5987
cd339a0e 5988 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
5989
5990 atomic_set(&bp->intr_sem, 0);
5991
5992 bnx2_enable_int(bp);
5993
f86e82fb 5994 if (bp->flags & BNX2_FLAG_USING_MSI) {
b6016b76
MC
5995 /* Test MSI to make sure it is working
5996 * If MSI test fails, go back to INTx mode
5997 */
5998 if (bnx2_test_intr(bp) != 0) {
5999 printk(KERN_WARNING PFX "%s: No interrupt was generated"
6000 " using MSI, switching to INTx mode. Please"
6001 " report this failure to the PCI maintainer"
6002 " and include system chipset information.\n",
6003 bp->dev->name);
6004
6005 bnx2_disable_int(bp);
8e6a72c4 6006 bnx2_free_irq(bp);
b6016b76 6007
6d866ffc
MC
6008 bnx2_setup_int_mode(bp, 1);
6009
9a120bc5 6010 rc = bnx2_init_nic(bp, 0);
b6016b76 6011
8e6a72c4
MC
6012 if (!rc)
6013 rc = bnx2_request_irq(bp);
6014
b6016b76 6015 if (rc) {
b6016b76 6016 del_timer_sync(&bp->timer);
2739a8bb 6017 goto open_err;
b6016b76
MC
6018 }
6019 bnx2_enable_int(bp);
6020 }
6021 }
f86e82fb 6022 if (bp->flags & BNX2_FLAG_USING_MSI)
b6016b76 6023 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
f86e82fb 6024 else if (bp->flags & BNX2_FLAG_USING_MSIX)
57851d84 6025 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
b6016b76 6026
706bf240 6027 netif_tx_start_all_queues(dev);
b6016b76
MC
6028
6029 return 0;
2739a8bb
MC
6030
6031open_err:
6032 bnx2_napi_disable(bp);
6033 bnx2_free_skbs(bp);
6034 bnx2_free_irq(bp);
6035 bnx2_free_mem(bp);
6036 return rc;
b6016b76
MC
6037}
6038
6039static void
c4028958 6040bnx2_reset_task(struct work_struct *work)
b6016b76 6041{
c4028958 6042 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
b6016b76 6043
afdc08b9
MC
6044 if (!netif_running(bp->dev))
6045 return;
6046
b6016b76
MC
6047 bnx2_netif_stop(bp);
6048
9a120bc5 6049 bnx2_init_nic(bp, 1);
b6016b76
MC
6050
6051 atomic_set(&bp->intr_sem, 1);
6052 bnx2_netif_start(bp);
6053}
6054
6055static void
6056bnx2_tx_timeout(struct net_device *dev)
6057{
972ec0d4 6058 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6059
6060 /* This allows the netif to be shutdown gracefully before resetting */
6061 schedule_work(&bp->reset_task);
6062}
6063
6064#ifdef BCM_VLAN
6065/* Called with rtnl_lock */
6066static void
6067bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6068{
972ec0d4 6069 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6070
6071 bnx2_netif_stop(bp);
6072
6073 bp->vlgrp = vlgrp;
6074 bnx2_set_rx_mode(dev);
7c62e83b
MC
6075 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6076 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
b6016b76
MC
6077
6078 bnx2_netif_start(bp);
6079}
b6016b76
MC
6080#endif
6081
932ff279 6082/* Called with netif_tx_lock.
2f8af120
MC
6083 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6084 * netif_wake_queue().
b6016b76
MC
6085 */
6086static int
6087bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6088{
972ec0d4 6089 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6090 dma_addr_t mapping;
6091 struct tx_bd *txbd;
3d16af86 6092 struct sw_tx_bd *tx_buf;
b6016b76
MC
6093 u32 len, vlan_tag_flags, last_frag, mss;
6094 u16 prod, ring_prod;
6095 int i;
706bf240
BL
6096 struct bnx2_napi *bnapi;
6097 struct bnx2_tx_ring_info *txr;
6098 struct netdev_queue *txq;
3d16af86 6099 struct skb_shared_info *sp;
706bf240
BL
6100
6101 /* Determine which tx ring we will be placed on */
6102 i = skb_get_queue_mapping(skb);
6103 bnapi = &bp->bnx2_napi[i];
6104 txr = &bnapi->tx_ring;
6105 txq = netdev_get_tx_queue(dev, i);
b6016b76 6106
35e9010b 6107 if (unlikely(bnx2_tx_avail(bp, txr) <
a550c99b 6108 (skb_shinfo(skb)->nr_frags + 1))) {
706bf240 6109 netif_tx_stop_queue(txq);
b6016b76
MC
6110 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
6111 dev->name);
6112
6113 return NETDEV_TX_BUSY;
6114 }
6115 len = skb_headlen(skb);
35e9010b 6116 prod = txr->tx_prod;
b6016b76
MC
6117 ring_prod = TX_RING_IDX(prod);
6118
6119 vlan_tag_flags = 0;
84fa7933 6120 if (skb->ip_summed == CHECKSUM_PARTIAL) {
b6016b76
MC
6121 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6122 }
6123
729b85cd 6124#ifdef BCM_VLAN
79ea13ce 6125 if (bp->vlgrp && vlan_tx_tag_present(skb)) {
b6016b76
MC
6126 vlan_tag_flags |=
6127 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6128 }
729b85cd 6129#endif
fde82055 6130 if ((mss = skb_shinfo(skb)->gso_size)) {
a1efb4b6 6131 u32 tcp_opt_len;
eddc9ec5 6132 struct iphdr *iph;
b6016b76 6133
b6016b76
MC
6134 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6135
4666f87a
MC
6136 tcp_opt_len = tcp_optlen(skb);
6137
6138 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6139 u32 tcp_off = skb_transport_offset(skb) -
6140 sizeof(struct ipv6hdr) - ETH_HLEN;
ab6a5bb6 6141
4666f87a
MC
6142 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6143 TX_BD_FLAGS_SW_FLAGS;
6144 if (likely(tcp_off == 0))
6145 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6146 else {
6147 tcp_off >>= 3;
6148 vlan_tag_flags |= ((tcp_off & 0x3) <<
6149 TX_BD_FLAGS_TCP6_OFF0_SHL) |
6150 ((tcp_off & 0x10) <<
6151 TX_BD_FLAGS_TCP6_OFF4_SHL);
6152 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6153 }
6154 } else {
4666f87a 6155 iph = ip_hdr(skb);
4666f87a
MC
6156 if (tcp_opt_len || (iph->ihl > 5)) {
6157 vlan_tag_flags |= ((iph->ihl - 5) +
6158 (tcp_opt_len >> 2)) << 8;
6159 }
b6016b76 6160 }
4666f87a 6161 } else
b6016b76 6162 mss = 0;
b6016b76 6163
3d16af86
BL
6164 if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
6165 dev_kfree_skb(skb);
6166 return NETDEV_TX_OK;
6167 }
6168
6169 sp = skb_shinfo(skb);
042a53a9 6170 mapping = sp->dma_head;
6aa20a22 6171
35e9010b 6172 tx_buf = &txr->tx_buf_ring[ring_prod];
b6016b76 6173 tx_buf->skb = skb;
b6016b76 6174
35e9010b 6175 txbd = &txr->tx_desc_ring[ring_prod];
b6016b76
MC
6176
6177 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6178 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6179 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6180 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6181
6182 last_frag = skb_shinfo(skb)->nr_frags;
d62fda08
ED
6183 tx_buf->nr_frags = last_frag;
6184 tx_buf->is_gso = skb_is_gso(skb);
b6016b76
MC
6185
6186 for (i = 0; i < last_frag; i++) {
6187 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6188
6189 prod = NEXT_TX_BD(prod);
6190 ring_prod = TX_RING_IDX(prod);
35e9010b 6191 txbd = &txr->tx_desc_ring[ring_prod];
b6016b76
MC
6192
6193 len = frag->size;
042a53a9 6194 mapping = sp->dma_maps[i];
b6016b76
MC
6195
6196 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6197 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6198 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6199 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6200
6201 }
6202 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6203
6204 prod = NEXT_TX_BD(prod);
35e9010b 6205 txr->tx_prod_bseq += skb->len;
b6016b76 6206
35e9010b
MC
6207 REG_WR16(bp, txr->tx_bidx_addr, prod);
6208 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
b6016b76
MC
6209
6210 mmiowb();
6211
35e9010b 6212 txr->tx_prod = prod;
b6016b76 6213
35e9010b 6214 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
706bf240 6215 netif_tx_stop_queue(txq);
35e9010b 6216 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
706bf240 6217 netif_tx_wake_queue(txq);
b6016b76
MC
6218 }
6219
6220 return NETDEV_TX_OK;
6221}
6222
6223/* Called with rtnl_lock */
6224static int
6225bnx2_close(struct net_device *dev)
6226{
972ec0d4 6227 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6228
4bb073c0 6229 cancel_work_sync(&bp->reset_task);
afdc08b9 6230
bea3348e 6231 bnx2_disable_int_sync(bp);
35efa7c1 6232 bnx2_napi_disable(bp);
b6016b76 6233 del_timer_sync(&bp->timer);
74bf4ba3 6234 bnx2_shutdown_chip(bp);
8e6a72c4 6235 bnx2_free_irq(bp);
b6016b76
MC
6236 bnx2_free_skbs(bp);
6237 bnx2_free_mem(bp);
6238 bp->link_up = 0;
6239 netif_carrier_off(bp->dev);
829ca9a3 6240 bnx2_set_power_state(bp, PCI_D3hot);
b6016b76
MC
6241 return 0;
6242}
6243
6244#define GET_NET_STATS64(ctr) \
6245 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
6246 (unsigned long) (ctr##_lo)
6247
6248#define GET_NET_STATS32(ctr) \
6249 (ctr##_lo)
6250
6251#if (BITS_PER_LONG == 64)
6252#define GET_NET_STATS GET_NET_STATS64
6253#else
6254#define GET_NET_STATS GET_NET_STATS32
6255#endif
6256
6257static struct net_device_stats *
6258bnx2_get_stats(struct net_device *dev)
6259{
972ec0d4 6260 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6261 struct statistics_block *stats_blk = bp->stats_blk;
d8e8034d 6262 struct net_device_stats *net_stats = &dev->stats;
b6016b76
MC
6263
6264 if (bp->stats_blk == NULL) {
6265 return net_stats;
6266 }
6267 net_stats->rx_packets =
6268 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
6269 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
6270 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
6271
6272 net_stats->tx_packets =
6273 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
6274 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
6275 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
6276
6277 net_stats->rx_bytes =
6278 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
6279
6280 net_stats->tx_bytes =
6281 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
6282
6aa20a22 6283 net_stats->multicast =
b6016b76
MC
6284 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
6285
6aa20a22 6286 net_stats->collisions =
b6016b76
MC
6287 (unsigned long) stats_blk->stat_EtherStatsCollisions;
6288
6aa20a22 6289 net_stats->rx_length_errors =
b6016b76
MC
6290 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
6291 stats_blk->stat_EtherStatsOverrsizePkts);
6292
6aa20a22 6293 net_stats->rx_over_errors =
b6016b76
MC
6294 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
6295
6aa20a22 6296 net_stats->rx_frame_errors =
b6016b76
MC
6297 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
6298
6aa20a22 6299 net_stats->rx_crc_errors =
b6016b76
MC
6300 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
6301
6302 net_stats->rx_errors = net_stats->rx_length_errors +
6303 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6304 net_stats->rx_crc_errors;
6305
6306 net_stats->tx_aborted_errors =
6307 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
6308 stats_blk->stat_Dot3StatsLateCollisions);
6309
5b0c76ad
MC
6310 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6311 (CHIP_ID(bp) == CHIP_ID_5708_A0))
b6016b76
MC
6312 net_stats->tx_carrier_errors = 0;
6313 else {
6314 net_stats->tx_carrier_errors =
6315 (unsigned long)
6316 stats_blk->stat_Dot3StatsCarrierSenseErrors;
6317 }
6318
6319 net_stats->tx_errors =
6aa20a22 6320 (unsigned long)
b6016b76
MC
6321 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
6322 +
6323 net_stats->tx_aborted_errors +
6324 net_stats->tx_carrier_errors;
6325
cea94db9
MC
6326 net_stats->rx_missed_errors =
6327 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
6328 stats_blk->stat_FwRxDrop);
6329
b6016b76
MC
6330 return net_stats;
6331}
6332
6333/* All ethtool functions called with rtnl_lock */
6334
6335static int
6336bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6337{
972ec0d4 6338 struct bnx2 *bp = netdev_priv(dev);
7b6b8347 6339 int support_serdes = 0, support_copper = 0;
b6016b76
MC
6340
6341 cmd->supported = SUPPORTED_Autoneg;
583c28e5 6342 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7b6b8347
MC
6343 support_serdes = 1;
6344 support_copper = 1;
6345 } else if (bp->phy_port == PORT_FIBRE)
6346 support_serdes = 1;
6347 else
6348 support_copper = 1;
6349
6350 if (support_serdes) {
b6016b76
MC
6351 cmd->supported |= SUPPORTED_1000baseT_Full |
6352 SUPPORTED_FIBRE;
583c28e5 6353 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
605a9e20 6354 cmd->supported |= SUPPORTED_2500baseX_Full;
b6016b76 6355
b6016b76 6356 }
7b6b8347 6357 if (support_copper) {
b6016b76
MC
6358 cmd->supported |= SUPPORTED_10baseT_Half |
6359 SUPPORTED_10baseT_Full |
6360 SUPPORTED_100baseT_Half |
6361 SUPPORTED_100baseT_Full |
6362 SUPPORTED_1000baseT_Full |
6363 SUPPORTED_TP;
6364
b6016b76
MC
6365 }
6366
7b6b8347
MC
6367 spin_lock_bh(&bp->phy_lock);
6368 cmd->port = bp->phy_port;
b6016b76
MC
6369 cmd->advertising = bp->advertising;
6370
6371 if (bp->autoneg & AUTONEG_SPEED) {
6372 cmd->autoneg = AUTONEG_ENABLE;
6373 }
6374 else {
6375 cmd->autoneg = AUTONEG_DISABLE;
6376 }
6377
6378 if (netif_carrier_ok(dev)) {
6379 cmd->speed = bp->line_speed;
6380 cmd->duplex = bp->duplex;
6381 }
6382 else {
6383 cmd->speed = -1;
6384 cmd->duplex = -1;
6385 }
7b6b8347 6386 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6387
6388 cmd->transceiver = XCVR_INTERNAL;
6389 cmd->phy_address = bp->phy_addr;
6390
6391 return 0;
6392}
6aa20a22 6393
b6016b76
MC
6394static int
6395bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6396{
972ec0d4 6397 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6398 u8 autoneg = bp->autoneg;
6399 u8 req_duplex = bp->req_duplex;
6400 u16 req_line_speed = bp->req_line_speed;
6401 u32 advertising = bp->advertising;
7b6b8347
MC
6402 int err = -EINVAL;
6403
6404 spin_lock_bh(&bp->phy_lock);
6405
6406 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6407 goto err_out_unlock;
6408
583c28e5
MC
6409 if (cmd->port != bp->phy_port &&
6410 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
7b6b8347 6411 goto err_out_unlock;
b6016b76 6412
d6b14486
MC
6413 /* If device is down, we can store the settings only if the user
6414 * is setting the currently active port.
6415 */
6416 if (!netif_running(dev) && cmd->port != bp->phy_port)
6417 goto err_out_unlock;
6418
b6016b76
MC
6419 if (cmd->autoneg == AUTONEG_ENABLE) {
6420 autoneg |= AUTONEG_SPEED;
6421
6aa20a22 6422 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
b6016b76
MC
6423
6424 /* allow advertising 1 speed */
6425 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6426 (cmd->advertising == ADVERTISED_10baseT_Full) ||
6427 (cmd->advertising == ADVERTISED_100baseT_Half) ||
6428 (cmd->advertising == ADVERTISED_100baseT_Full)) {
6429
7b6b8347
MC
6430 if (cmd->port == PORT_FIBRE)
6431 goto err_out_unlock;
b6016b76
MC
6432
6433 advertising = cmd->advertising;
6434
27a005b8 6435 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
583c28e5 6436 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
7b6b8347
MC
6437 (cmd->port == PORT_TP))
6438 goto err_out_unlock;
6439 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
b6016b76 6440 advertising = cmd->advertising;
7b6b8347
MC
6441 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6442 goto err_out_unlock;
b6016b76 6443 else {
7b6b8347 6444 if (cmd->port == PORT_FIBRE)
b6016b76 6445 advertising = ETHTOOL_ALL_FIBRE_SPEED;
7b6b8347 6446 else
b6016b76 6447 advertising = ETHTOOL_ALL_COPPER_SPEED;
b6016b76
MC
6448 }
6449 advertising |= ADVERTISED_Autoneg;
6450 }
6451 else {
7b6b8347 6452 if (cmd->port == PORT_FIBRE) {
80be4434
MC
6453 if ((cmd->speed != SPEED_1000 &&
6454 cmd->speed != SPEED_2500) ||
6455 (cmd->duplex != DUPLEX_FULL))
7b6b8347 6456 goto err_out_unlock;
80be4434
MC
6457
6458 if (cmd->speed == SPEED_2500 &&
583c28e5 6459 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
7b6b8347 6460 goto err_out_unlock;
b6016b76 6461 }
7b6b8347
MC
6462 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6463 goto err_out_unlock;
6464
b6016b76
MC
6465 autoneg &= ~AUTONEG_SPEED;
6466 req_line_speed = cmd->speed;
6467 req_duplex = cmd->duplex;
6468 advertising = 0;
6469 }
6470
6471 bp->autoneg = autoneg;
6472 bp->advertising = advertising;
6473 bp->req_line_speed = req_line_speed;
6474 bp->req_duplex = req_duplex;
6475
d6b14486
MC
6476 err = 0;
6477 /* If device is down, the new settings will be picked up when it is
6478 * brought up.
6479 */
6480 if (netif_running(dev))
6481 err = bnx2_setup_phy(bp, cmd->port);
b6016b76 6482
7b6b8347 6483err_out_unlock:
c770a65c 6484 spin_unlock_bh(&bp->phy_lock);
b6016b76 6485
7b6b8347 6486 return err;
b6016b76
MC
6487}
6488
6489static void
6490bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6491{
972ec0d4 6492 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6493
6494 strcpy(info->driver, DRV_MODULE_NAME);
6495 strcpy(info->version, DRV_MODULE_VERSION);
6496 strcpy(info->bus_info, pci_name(bp->pdev));
58fc2ea4 6497 strcpy(info->fw_version, bp->fw_version);
b6016b76
MC
6498}
6499
244ac4f4
MC
6500#define BNX2_REGDUMP_LEN (32 * 1024)
6501
6502static int
6503bnx2_get_regs_len(struct net_device *dev)
6504{
6505 return BNX2_REGDUMP_LEN;
6506}
6507
6508static void
6509bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6510{
6511 u32 *p = _p, i, offset;
6512 u8 *orig_p = _p;
6513 struct bnx2 *bp = netdev_priv(dev);
6514 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6515 0x0800, 0x0880, 0x0c00, 0x0c10,
6516 0x0c30, 0x0d08, 0x1000, 0x101c,
6517 0x1040, 0x1048, 0x1080, 0x10a4,
6518 0x1400, 0x1490, 0x1498, 0x14f0,
6519 0x1500, 0x155c, 0x1580, 0x15dc,
6520 0x1600, 0x1658, 0x1680, 0x16d8,
6521 0x1800, 0x1820, 0x1840, 0x1854,
6522 0x1880, 0x1894, 0x1900, 0x1984,
6523 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6524 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6525 0x2000, 0x2030, 0x23c0, 0x2400,
6526 0x2800, 0x2820, 0x2830, 0x2850,
6527 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6528 0x3c00, 0x3c94, 0x4000, 0x4010,
6529 0x4080, 0x4090, 0x43c0, 0x4458,
6530 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6531 0x4fc0, 0x5010, 0x53c0, 0x5444,
6532 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6533 0x5fc0, 0x6000, 0x6400, 0x6428,
6534 0x6800, 0x6848, 0x684c, 0x6860,
6535 0x6888, 0x6910, 0x8000 };
6536
6537 regs->version = 0;
6538
6539 memset(p, 0, BNX2_REGDUMP_LEN);
6540
6541 if (!netif_running(bp->dev))
6542 return;
6543
6544 i = 0;
6545 offset = reg_boundaries[0];
6546 p += offset;
6547 while (offset < BNX2_REGDUMP_LEN) {
6548 *p++ = REG_RD(bp, offset);
6549 offset += 4;
6550 if (offset == reg_boundaries[i + 1]) {
6551 offset = reg_boundaries[i + 2];
6552 p = (u32 *) (orig_p + offset);
6553 i += 2;
6554 }
6555 }
6556}
6557
b6016b76
MC
6558static void
6559bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6560{
972ec0d4 6561 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6562
f86e82fb 6563 if (bp->flags & BNX2_FLAG_NO_WOL) {
b6016b76
MC
6564 wol->supported = 0;
6565 wol->wolopts = 0;
6566 }
6567 else {
6568 wol->supported = WAKE_MAGIC;
6569 if (bp->wol)
6570 wol->wolopts = WAKE_MAGIC;
6571 else
6572 wol->wolopts = 0;
6573 }
6574 memset(&wol->sopass, 0, sizeof(wol->sopass));
6575}
6576
6577static int
6578bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6579{
972ec0d4 6580 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6581
6582 if (wol->wolopts & ~WAKE_MAGIC)
6583 return -EINVAL;
6584
6585 if (wol->wolopts & WAKE_MAGIC) {
f86e82fb 6586 if (bp->flags & BNX2_FLAG_NO_WOL)
b6016b76
MC
6587 return -EINVAL;
6588
6589 bp->wol = 1;
6590 }
6591 else {
6592 bp->wol = 0;
6593 }
6594 return 0;
6595}
6596
6597static int
6598bnx2_nway_reset(struct net_device *dev)
6599{
972ec0d4 6600 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6601 u32 bmcr;
6602
9f52b564
MC
6603 if (!netif_running(dev))
6604 return -EAGAIN;
6605
b6016b76
MC
6606 if (!(bp->autoneg & AUTONEG_SPEED)) {
6607 return -EINVAL;
6608 }
6609
c770a65c 6610 spin_lock_bh(&bp->phy_lock);
b6016b76 6611
583c28e5 6612 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7b6b8347
MC
6613 int rc;
6614
6615 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6616 spin_unlock_bh(&bp->phy_lock);
6617 return rc;
6618 }
6619
b6016b76 6620 /* Force a link down visible on the other side */
583c28e5 6621 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
ca58c3af 6622 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
c770a65c 6623 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6624
6625 msleep(20);
6626
c770a65c 6627 spin_lock_bh(&bp->phy_lock);
f8dd064e 6628
40105c0b 6629 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
f8dd064e
MC
6630 bp->serdes_an_pending = 1;
6631 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
6632 }
6633
ca58c3af 6634 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76 6635 bmcr &= ~BMCR_LOOPBACK;
ca58c3af 6636 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
b6016b76 6637
c770a65c 6638 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6639
6640 return 0;
6641}
6642
6643static int
6644bnx2_get_eeprom_len(struct net_device *dev)
6645{
972ec0d4 6646 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6647
1122db71 6648 if (bp->flash_info == NULL)
b6016b76
MC
6649 return 0;
6650
1122db71 6651 return (int) bp->flash_size;
b6016b76
MC
6652}
6653
6654static int
6655bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6656 u8 *eebuf)
6657{
972ec0d4 6658 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6659 int rc;
6660
9f52b564
MC
6661 if (!netif_running(dev))
6662 return -EAGAIN;
6663
1064e944 6664 /* parameters already validated in ethtool_get_eeprom */
b6016b76
MC
6665
6666 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6667
6668 return rc;
6669}
6670
6671static int
6672bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6673 u8 *eebuf)
6674{
972ec0d4 6675 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6676 int rc;
6677
9f52b564
MC
6678 if (!netif_running(dev))
6679 return -EAGAIN;
6680
1064e944 6681 /* parameters already validated in ethtool_set_eeprom */
b6016b76
MC
6682
6683 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6684
6685 return rc;
6686}
6687
6688static int
6689bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6690{
972ec0d4 6691 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6692
6693 memset(coal, 0, sizeof(struct ethtool_coalesce));
6694
6695 coal->rx_coalesce_usecs = bp->rx_ticks;
6696 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6697 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6698 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6699
6700 coal->tx_coalesce_usecs = bp->tx_ticks;
6701 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6702 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6703 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6704
6705 coal->stats_block_coalesce_usecs = bp->stats_ticks;
6706
6707 return 0;
6708}
6709
6710static int
6711bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6712{
972ec0d4 6713 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6714
6715 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6716 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6717
6aa20a22 6718 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
b6016b76
MC
6719 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6720
6721 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6722 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6723
6724 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6725 if (bp->rx_quick_cons_trip_int > 0xff)
6726 bp->rx_quick_cons_trip_int = 0xff;
6727
6728 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6729 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6730
6731 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6732 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6733
6734 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6735 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6736
6737 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6738 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6739 0xff;
6740
6741 bp->stats_ticks = coal->stats_block_coalesce_usecs;
02537b06
MC
6742 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6743 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6744 bp->stats_ticks = USEC_PER_SEC;
6745 }
7ea6920e
MC
6746 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6747 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6748 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
b6016b76
MC
6749
6750 if (netif_running(bp->dev)) {
6751 bnx2_netif_stop(bp);
9a120bc5 6752 bnx2_init_nic(bp, 0);
b6016b76
MC
6753 bnx2_netif_start(bp);
6754 }
6755
6756 return 0;
6757}
6758
6759static void
6760bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6761{
972ec0d4 6762 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6763
13daffa2 6764 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
b6016b76 6765 ering->rx_mini_max_pending = 0;
47bf4246 6766 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
b6016b76
MC
6767
6768 ering->rx_pending = bp->rx_ring_size;
6769 ering->rx_mini_pending = 0;
47bf4246 6770 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
b6016b76
MC
6771
6772 ering->tx_max_pending = MAX_TX_DESC_CNT;
6773 ering->tx_pending = bp->tx_ring_size;
6774}
6775
6776static int
5d5d0015 6777bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
b6016b76 6778{
13daffa2
MC
6779 if (netif_running(bp->dev)) {
6780 bnx2_netif_stop(bp);
6781 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6782 bnx2_free_skbs(bp);
6783 bnx2_free_mem(bp);
6784 }
6785
5d5d0015
MC
6786 bnx2_set_rx_ring_size(bp, rx);
6787 bp->tx_ring_size = tx;
b6016b76
MC
6788
6789 if (netif_running(bp->dev)) {
13daffa2
MC
6790 int rc;
6791
6792 rc = bnx2_alloc_mem(bp);
6793 if (rc)
6794 return rc;
9a120bc5 6795 bnx2_init_nic(bp, 0);
b6016b76
MC
6796 bnx2_netif_start(bp);
6797 }
b6016b76
MC
6798 return 0;
6799}
6800
5d5d0015
MC
6801static int
6802bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6803{
6804 struct bnx2 *bp = netdev_priv(dev);
6805 int rc;
6806
6807 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6808 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6809 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6810
6811 return -EINVAL;
6812 }
6813 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6814 return rc;
6815}
6816
b6016b76
MC
6817static void
6818bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6819{
972ec0d4 6820 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6821
6822 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6823 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6824 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6825}
6826
6827static int
6828bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6829{
972ec0d4 6830 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6831
6832 bp->req_flow_ctrl = 0;
6833 if (epause->rx_pause)
6834 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6835 if (epause->tx_pause)
6836 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6837
6838 if (epause->autoneg) {
6839 bp->autoneg |= AUTONEG_FLOW_CTRL;
6840 }
6841 else {
6842 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6843 }
6844
9f52b564
MC
6845 if (netif_running(dev)) {
6846 spin_lock_bh(&bp->phy_lock);
6847 bnx2_setup_phy(bp, bp->phy_port);
6848 spin_unlock_bh(&bp->phy_lock);
6849 }
b6016b76
MC
6850
6851 return 0;
6852}
6853
6854static u32
6855bnx2_get_rx_csum(struct net_device *dev)
6856{
972ec0d4 6857 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6858
6859 return bp->rx_csum;
6860}
6861
6862static int
6863bnx2_set_rx_csum(struct net_device *dev, u32 data)
6864{
972ec0d4 6865 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6866
6867 bp->rx_csum = data;
6868 return 0;
6869}
6870
b11d6213
MC
6871static int
6872bnx2_set_tso(struct net_device *dev, u32 data)
6873{
4666f87a
MC
6874 struct bnx2 *bp = netdev_priv(dev);
6875
6876 if (data) {
b11d6213 6877 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
4666f87a
MC
6878 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6879 dev->features |= NETIF_F_TSO6;
6880 } else
6881 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6882 NETIF_F_TSO_ECN);
b11d6213
MC
6883 return 0;
6884}
6885
cea94db9 6886#define BNX2_NUM_STATS 46
b6016b76 6887
14ab9b86 6888static struct {
b6016b76
MC
6889 char string[ETH_GSTRING_LEN];
6890} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6891 { "rx_bytes" },
6892 { "rx_error_bytes" },
6893 { "tx_bytes" },
6894 { "tx_error_bytes" },
6895 { "rx_ucast_packets" },
6896 { "rx_mcast_packets" },
6897 { "rx_bcast_packets" },
6898 { "tx_ucast_packets" },
6899 { "tx_mcast_packets" },
6900 { "tx_bcast_packets" },
6901 { "tx_mac_errors" },
6902 { "tx_carrier_errors" },
6903 { "rx_crc_errors" },
6904 { "rx_align_errors" },
6905 { "tx_single_collisions" },
6906 { "tx_multi_collisions" },
6907 { "tx_deferred" },
6908 { "tx_excess_collisions" },
6909 { "tx_late_collisions" },
6910 { "tx_total_collisions" },
6911 { "rx_fragments" },
6912 { "rx_jabbers" },
6913 { "rx_undersize_packets" },
6914 { "rx_oversize_packets" },
6915 { "rx_64_byte_packets" },
6916 { "rx_65_to_127_byte_packets" },
6917 { "rx_128_to_255_byte_packets" },
6918 { "rx_256_to_511_byte_packets" },
6919 { "rx_512_to_1023_byte_packets" },
6920 { "rx_1024_to_1522_byte_packets" },
6921 { "rx_1523_to_9022_byte_packets" },
6922 { "tx_64_byte_packets" },
6923 { "tx_65_to_127_byte_packets" },
6924 { "tx_128_to_255_byte_packets" },
6925 { "tx_256_to_511_byte_packets" },
6926 { "tx_512_to_1023_byte_packets" },
6927 { "tx_1024_to_1522_byte_packets" },
6928 { "tx_1523_to_9022_byte_packets" },
6929 { "rx_xon_frames" },
6930 { "rx_xoff_frames" },
6931 { "tx_xon_frames" },
6932 { "tx_xoff_frames" },
6933 { "rx_mac_ctrl_frames" },
6934 { "rx_filtered_packets" },
6935 { "rx_discards" },
cea94db9 6936 { "rx_fw_discards" },
b6016b76
MC
6937};
6938
6939#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6940
f71e1309 6941static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
b6016b76
MC
6942 STATS_OFFSET32(stat_IfHCInOctets_hi),
6943 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6944 STATS_OFFSET32(stat_IfHCOutOctets_hi),
6945 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6946 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6947 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6948 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6949 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6950 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6951 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6952 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6aa20a22
JG
6953 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6954 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6955 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6956 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6957 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6958 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6959 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6960 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6961 STATS_OFFSET32(stat_EtherStatsCollisions),
6962 STATS_OFFSET32(stat_EtherStatsFragments),
6963 STATS_OFFSET32(stat_EtherStatsJabbers),
6964 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6965 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6966 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6967 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6968 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6969 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6970 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6971 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6972 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6973 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6974 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6975 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6976 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6977 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6978 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6979 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6980 STATS_OFFSET32(stat_XonPauseFramesReceived),
6981 STATS_OFFSET32(stat_XoffPauseFramesReceived),
6982 STATS_OFFSET32(stat_OutXonSent),
6983 STATS_OFFSET32(stat_OutXoffSent),
6984 STATS_OFFSET32(stat_MacControlFramesReceived),
6985 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6986 STATS_OFFSET32(stat_IfInMBUFDiscards),
cea94db9 6987 STATS_OFFSET32(stat_FwRxDrop),
b6016b76
MC
6988};
6989
6990/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6991 * skipped because of errata.
6aa20a22 6992 */
14ab9b86 6993static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
b6016b76
MC
6994 8,0,8,8,8,8,8,8,8,8,
6995 4,0,4,4,4,4,4,4,4,4,
6996 4,4,4,4,4,4,4,4,4,4,
6997 4,4,4,4,4,4,4,4,4,4,
cea94db9 6998 4,4,4,4,4,4,
b6016b76
MC
6999};
7000
5b0c76ad
MC
7001static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7002 8,0,8,8,8,8,8,8,8,8,
7003 4,4,4,4,4,4,4,4,4,4,
7004 4,4,4,4,4,4,4,4,4,4,
7005 4,4,4,4,4,4,4,4,4,4,
cea94db9 7006 4,4,4,4,4,4,
5b0c76ad
MC
7007};
7008
b6016b76
MC
7009#define BNX2_NUM_TESTS 6
7010
14ab9b86 7011static struct {
b6016b76
MC
7012 char string[ETH_GSTRING_LEN];
7013} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7014 { "register_test (offline)" },
7015 { "memory_test (offline)" },
7016 { "loopback_test (offline)" },
7017 { "nvram_test (online)" },
7018 { "interrupt_test (online)" },
7019 { "link_test (online)" },
7020};
7021
7022static int
b9f2c044 7023bnx2_get_sset_count(struct net_device *dev, int sset)
b6016b76 7024{
b9f2c044
JG
7025 switch (sset) {
7026 case ETH_SS_TEST:
7027 return BNX2_NUM_TESTS;
7028 case ETH_SS_STATS:
7029 return BNX2_NUM_STATS;
7030 default:
7031 return -EOPNOTSUPP;
7032 }
b6016b76
MC
7033}
7034
7035static void
7036bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7037{
972ec0d4 7038 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7039
9f52b564
MC
7040 bnx2_set_power_state(bp, PCI_D0);
7041
b6016b76
MC
7042 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7043 if (etest->flags & ETH_TEST_FL_OFFLINE) {
80be4434
MC
7044 int i;
7045
b6016b76
MC
7046 bnx2_netif_stop(bp);
7047 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7048 bnx2_free_skbs(bp);
7049
7050 if (bnx2_test_registers(bp) != 0) {
7051 buf[0] = 1;
7052 etest->flags |= ETH_TEST_FL_FAILED;
7053 }
7054 if (bnx2_test_memory(bp) != 0) {
7055 buf[1] = 1;
7056 etest->flags |= ETH_TEST_FL_FAILED;
7057 }
bc5a0690 7058 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
b6016b76 7059 etest->flags |= ETH_TEST_FL_FAILED;
b6016b76 7060
9f52b564
MC
7061 if (!netif_running(bp->dev))
7062 bnx2_shutdown_chip(bp);
b6016b76 7063 else {
9a120bc5 7064 bnx2_init_nic(bp, 1);
b6016b76
MC
7065 bnx2_netif_start(bp);
7066 }
7067
7068 /* wait for link up */
80be4434
MC
7069 for (i = 0; i < 7; i++) {
7070 if (bp->link_up)
7071 break;
7072 msleep_interruptible(1000);
7073 }
b6016b76
MC
7074 }
7075
7076 if (bnx2_test_nvram(bp) != 0) {
7077 buf[3] = 1;
7078 etest->flags |= ETH_TEST_FL_FAILED;
7079 }
7080 if (bnx2_test_intr(bp) != 0) {
7081 buf[4] = 1;
7082 etest->flags |= ETH_TEST_FL_FAILED;
7083 }
7084
7085 if (bnx2_test_link(bp) != 0) {
7086 buf[5] = 1;
7087 etest->flags |= ETH_TEST_FL_FAILED;
7088
7089 }
9f52b564
MC
7090 if (!netif_running(bp->dev))
7091 bnx2_set_power_state(bp, PCI_D3hot);
b6016b76
MC
7092}
7093
7094static void
7095bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7096{
7097 switch (stringset) {
7098 case ETH_SS_STATS:
7099 memcpy(buf, bnx2_stats_str_arr,
7100 sizeof(bnx2_stats_str_arr));
7101 break;
7102 case ETH_SS_TEST:
7103 memcpy(buf, bnx2_tests_str_arr,
7104 sizeof(bnx2_tests_str_arr));
7105 break;
7106 }
7107}
7108
b6016b76
MC
7109static void
7110bnx2_get_ethtool_stats(struct net_device *dev,
7111 struct ethtool_stats *stats, u64 *buf)
7112{
972ec0d4 7113 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7114 int i;
7115 u32 *hw_stats = (u32 *) bp->stats_blk;
14ab9b86 7116 u8 *stats_len_arr = NULL;
b6016b76
MC
7117
7118 if (hw_stats == NULL) {
7119 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7120 return;
7121 }
7122
5b0c76ad
MC
7123 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7124 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7125 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7126 (CHIP_ID(bp) == CHIP_ID_5708_A0))
b6016b76 7127 stats_len_arr = bnx2_5706_stats_len_arr;
5b0c76ad
MC
7128 else
7129 stats_len_arr = bnx2_5708_stats_len_arr;
b6016b76
MC
7130
7131 for (i = 0; i < BNX2_NUM_STATS; i++) {
7132 if (stats_len_arr[i] == 0) {
7133 /* skip this counter */
7134 buf[i] = 0;
7135 continue;
7136 }
7137 if (stats_len_arr[i] == 4) {
7138 /* 4-byte counter */
7139 buf[i] = (u64)
7140 *(hw_stats + bnx2_stats_offset_arr[i]);
7141 continue;
7142 }
7143 /* 8-byte counter */
7144 buf[i] = (((u64) *(hw_stats +
7145 bnx2_stats_offset_arr[i])) << 32) +
7146 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
7147 }
7148}
7149
7150static int
7151bnx2_phys_id(struct net_device *dev, u32 data)
7152{
972ec0d4 7153 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7154 int i;
7155 u32 save;
7156
9f52b564
MC
7157 bnx2_set_power_state(bp, PCI_D0);
7158
b6016b76
MC
7159 if (data == 0)
7160 data = 2;
7161
7162 save = REG_RD(bp, BNX2_MISC_CFG);
7163 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7164
7165 for (i = 0; i < (data * 2); i++) {
7166 if ((i % 2) == 0) {
7167 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7168 }
7169 else {
7170 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7171 BNX2_EMAC_LED_1000MB_OVERRIDE |
7172 BNX2_EMAC_LED_100MB_OVERRIDE |
7173 BNX2_EMAC_LED_10MB_OVERRIDE |
7174 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7175 BNX2_EMAC_LED_TRAFFIC);
7176 }
7177 msleep_interruptible(500);
7178 if (signal_pending(current))
7179 break;
7180 }
7181 REG_WR(bp, BNX2_EMAC_LED, 0);
7182 REG_WR(bp, BNX2_MISC_CFG, save);
9f52b564
MC
7183
7184 if (!netif_running(dev))
7185 bnx2_set_power_state(bp, PCI_D3hot);
7186
b6016b76
MC
7187 return 0;
7188}
7189
4666f87a
MC
7190static int
7191bnx2_set_tx_csum(struct net_device *dev, u32 data)
7192{
7193 struct bnx2 *bp = netdev_priv(dev);
7194
7195 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6460d948 7196 return (ethtool_op_set_tx_ipv6_csum(dev, data));
4666f87a
MC
7197 else
7198 return (ethtool_op_set_tx_csum(dev, data));
7199}
7200
7282d491 7201static const struct ethtool_ops bnx2_ethtool_ops = {
b6016b76
MC
7202 .get_settings = bnx2_get_settings,
7203 .set_settings = bnx2_set_settings,
7204 .get_drvinfo = bnx2_get_drvinfo,
244ac4f4
MC
7205 .get_regs_len = bnx2_get_regs_len,
7206 .get_regs = bnx2_get_regs,
b6016b76
MC
7207 .get_wol = bnx2_get_wol,
7208 .set_wol = bnx2_set_wol,
7209 .nway_reset = bnx2_nway_reset,
7210 .get_link = ethtool_op_get_link,
7211 .get_eeprom_len = bnx2_get_eeprom_len,
7212 .get_eeprom = bnx2_get_eeprom,
7213 .set_eeprom = bnx2_set_eeprom,
7214 .get_coalesce = bnx2_get_coalesce,
7215 .set_coalesce = bnx2_set_coalesce,
7216 .get_ringparam = bnx2_get_ringparam,
7217 .set_ringparam = bnx2_set_ringparam,
7218 .get_pauseparam = bnx2_get_pauseparam,
7219 .set_pauseparam = bnx2_set_pauseparam,
7220 .get_rx_csum = bnx2_get_rx_csum,
7221 .set_rx_csum = bnx2_set_rx_csum,
4666f87a 7222 .set_tx_csum = bnx2_set_tx_csum,
b6016b76 7223 .set_sg = ethtool_op_set_sg,
b11d6213 7224 .set_tso = bnx2_set_tso,
b6016b76
MC
7225 .self_test = bnx2_self_test,
7226 .get_strings = bnx2_get_strings,
7227 .phys_id = bnx2_phys_id,
b6016b76 7228 .get_ethtool_stats = bnx2_get_ethtool_stats,
b9f2c044 7229 .get_sset_count = bnx2_get_sset_count,
b6016b76
MC
7230};
7231
7232/* Called with rtnl_lock */
7233static int
7234bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7235{
14ab9b86 7236 struct mii_ioctl_data *data = if_mii(ifr);
972ec0d4 7237 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7238 int err;
7239
7240 switch(cmd) {
7241 case SIOCGMIIPHY:
7242 data->phy_id = bp->phy_addr;
7243
7244 /* fallthru */
7245 case SIOCGMIIREG: {
7246 u32 mii_regval;
7247
583c28e5 7248 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7b6b8347
MC
7249 return -EOPNOTSUPP;
7250
dad3e452
MC
7251 if (!netif_running(dev))
7252 return -EAGAIN;
7253
c770a65c 7254 spin_lock_bh(&bp->phy_lock);
b6016b76 7255 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
c770a65c 7256 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
7257
7258 data->val_out = mii_regval;
7259
7260 return err;
7261 }
7262
7263 case SIOCSMIIREG:
7264 if (!capable(CAP_NET_ADMIN))
7265 return -EPERM;
7266
583c28e5 7267 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7b6b8347
MC
7268 return -EOPNOTSUPP;
7269
dad3e452
MC
7270 if (!netif_running(dev))
7271 return -EAGAIN;
7272
c770a65c 7273 spin_lock_bh(&bp->phy_lock);
b6016b76 7274 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
c770a65c 7275 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
7276
7277 return err;
7278
7279 default:
7280 /* do nothing */
7281 break;
7282 }
7283 return -EOPNOTSUPP;
7284}
7285
7286/* Called with rtnl_lock */
7287static int
7288bnx2_change_mac_addr(struct net_device *dev, void *p)
7289{
7290 struct sockaddr *addr = p;
972ec0d4 7291 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7292
73eef4cd
MC
7293 if (!is_valid_ether_addr(addr->sa_data))
7294 return -EINVAL;
7295
b6016b76
MC
7296 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7297 if (netif_running(dev))
5fcaed01 7298 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
b6016b76
MC
7299
7300 return 0;
7301}
7302
7303/* Called with rtnl_lock */
7304static int
7305bnx2_change_mtu(struct net_device *dev, int new_mtu)
7306{
972ec0d4 7307 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7308
7309 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7310 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7311 return -EINVAL;
7312
7313 dev->mtu = new_mtu;
5d5d0015 7314 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
b6016b76
MC
7315}
7316
7317#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7318static void
7319poll_bnx2(struct net_device *dev)
7320{
972ec0d4 7321 struct bnx2 *bp = netdev_priv(dev);
b2af2c1d 7322 int i;
b6016b76 7323
b2af2c1d
NH
7324 for (i = 0; i < bp->irq_nvecs; i++) {
7325 disable_irq(bp->irq_tbl[i].vector);
7326 bnx2_interrupt(bp->irq_tbl[i].vector, &bp->bnx2_napi[i]);
7327 enable_irq(bp->irq_tbl[i].vector);
7328 }
b6016b76
MC
7329}
7330#endif
7331
253c8b75
MC
7332static void __devinit
7333bnx2_get_5709_media(struct bnx2 *bp)
7334{
7335 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7336 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7337 u32 strap;
7338
7339 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7340 return;
7341 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
583c28e5 7342 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
253c8b75
MC
7343 return;
7344 }
7345
7346 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7347 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7348 else
7349 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7350
7351 if (PCI_FUNC(bp->pdev->devfn) == 0) {
7352 switch (strap) {
7353 case 0x4:
7354 case 0x5:
7355 case 0x6:
583c28e5 7356 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
253c8b75
MC
7357 return;
7358 }
7359 } else {
7360 switch (strap) {
7361 case 0x1:
7362 case 0x2:
7363 case 0x4:
583c28e5 7364 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
253c8b75
MC
7365 return;
7366 }
7367 }
7368}
7369
883e5151
MC
7370static void __devinit
7371bnx2_get_pci_speed(struct bnx2 *bp)
7372{
7373 u32 reg;
7374
7375 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7376 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7377 u32 clkreg;
7378
f86e82fb 7379 bp->flags |= BNX2_FLAG_PCIX;
883e5151
MC
7380
7381 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7382
7383 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7384 switch (clkreg) {
7385 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7386 bp->bus_speed_mhz = 133;
7387 break;
7388
7389 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7390 bp->bus_speed_mhz = 100;
7391 break;
7392
7393 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7394 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7395 bp->bus_speed_mhz = 66;
7396 break;
7397
7398 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7399 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7400 bp->bus_speed_mhz = 50;
7401 break;
7402
7403 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7404 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7405 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7406 bp->bus_speed_mhz = 33;
7407 break;
7408 }
7409 }
7410 else {
7411 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7412 bp->bus_speed_mhz = 66;
7413 else
7414 bp->bus_speed_mhz = 33;
7415 }
7416
7417 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
f86e82fb 7418 bp->flags |= BNX2_FLAG_PCI_32BIT;
883e5151
MC
7419
7420}
7421
b6016b76
MC
7422static int __devinit
7423bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7424{
7425 struct bnx2 *bp;
7426 unsigned long mem_len;
58fc2ea4 7427 int rc, i, j;
b6016b76 7428 u32 reg;
40453c83 7429 u64 dma_mask, persist_dma_mask;
b6016b76 7430
b6016b76 7431 SET_NETDEV_DEV(dev, &pdev->dev);
972ec0d4 7432 bp = netdev_priv(dev);
b6016b76
MC
7433
7434 bp->flags = 0;
7435 bp->phy_flags = 0;
7436
7437 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7438 rc = pci_enable_device(pdev);
7439 if (rc) {
898eb71c 7440 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
b6016b76
MC
7441 goto err_out;
7442 }
7443
7444 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9b91cf9d 7445 dev_err(&pdev->dev,
2e8a538d 7446 "Cannot find PCI device base address, aborting.\n");
b6016b76
MC
7447 rc = -ENODEV;
7448 goto err_out_disable;
7449 }
7450
7451 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7452 if (rc) {
9b91cf9d 7453 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
b6016b76
MC
7454 goto err_out_disable;
7455 }
7456
7457 pci_set_master(pdev);
6ff2da49 7458 pci_save_state(pdev);
b6016b76
MC
7459
7460 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7461 if (bp->pm_cap == 0) {
9b91cf9d 7462 dev_err(&pdev->dev,
2e8a538d 7463 "Cannot find power management capability, aborting.\n");
b6016b76
MC
7464 rc = -EIO;
7465 goto err_out_release;
7466 }
7467
b6016b76
MC
7468 bp->dev = dev;
7469 bp->pdev = pdev;
7470
7471 spin_lock_init(&bp->phy_lock);
1b8227c4 7472 spin_lock_init(&bp->indirect_lock);
c4028958 7473 INIT_WORK(&bp->reset_task, bnx2_reset_task);
b6016b76
MC
7474
7475 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
706bf240 7476 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS);
b6016b76
MC
7477 dev->mem_end = dev->mem_start + mem_len;
7478 dev->irq = pdev->irq;
7479
7480 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7481
7482 if (!bp->regview) {
9b91cf9d 7483 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
b6016b76
MC
7484 rc = -ENOMEM;
7485 goto err_out_release;
7486 }
7487
7488 /* Configure byte swap and enable write to the reg_window registers.
7489 * Rely on CPU to do target byte swapping on big endian systems
7490 * The chip's target access swapping will not swap all accesses
7491 */
7492 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7493 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7494 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7495
829ca9a3 7496 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
7497
7498 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7499
883e5151
MC
7500 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7501 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7502 dev_err(&pdev->dev,
7503 "Cannot find PCIE capability, aborting.\n");
7504 rc = -EIO;
7505 goto err_out_unmap;
7506 }
f86e82fb 7507 bp->flags |= BNX2_FLAG_PCIE;
2dd201d7 7508 if (CHIP_REV(bp) == CHIP_REV_Ax)
f86e82fb 7509 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
883e5151 7510 } else {
59b47d8a
MC
7511 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7512 if (bp->pcix_cap == 0) {
7513 dev_err(&pdev->dev,
7514 "Cannot find PCIX capability, aborting.\n");
7515 rc = -EIO;
7516 goto err_out_unmap;
7517 }
7518 }
7519
b4b36042
MC
7520 if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7521 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
f86e82fb 7522 bp->flags |= BNX2_FLAG_MSIX_CAP;
b4b36042
MC
7523 }
7524
8e6a72c4
MC
7525 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7526 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
f86e82fb 7527 bp->flags |= BNX2_FLAG_MSI_CAP;
8e6a72c4
MC
7528 }
7529
40453c83
MC
7530 /* 5708 cannot support DMA addresses > 40-bit. */
7531 if (CHIP_NUM(bp) == CHIP_NUM_5708)
50cf156a 7532 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
40453c83 7533 else
6a35528a 7534 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
40453c83
MC
7535
7536 /* Configure DMA attributes. */
7537 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7538 dev->features |= NETIF_F_HIGHDMA;
7539 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7540 if (rc) {
7541 dev_err(&pdev->dev,
7542 "pci_set_consistent_dma_mask failed, aborting.\n");
7543 goto err_out_unmap;
7544 }
284901a9 7545 } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
40453c83
MC
7546 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7547 goto err_out_unmap;
7548 }
7549
f86e82fb 7550 if (!(bp->flags & BNX2_FLAG_PCIE))
883e5151 7551 bnx2_get_pci_speed(bp);
b6016b76
MC
7552
7553 /* 5706A0 may falsely detect SERR and PERR. */
7554 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7555 reg = REG_RD(bp, PCI_COMMAND);
7556 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7557 REG_WR(bp, PCI_COMMAND, reg);
7558 }
7559 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
f86e82fb 7560 !(bp->flags & BNX2_FLAG_PCIX)) {
b6016b76 7561
9b91cf9d 7562 dev_err(&pdev->dev,
2e8a538d 7563 "5706 A1 can only be used in a PCIX bus, aborting.\n");
b6016b76
MC
7564 goto err_out_unmap;
7565 }
7566
7567 bnx2_init_nvram(bp);
7568
2726d6e1 7569 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
e3648b3d
MC
7570
7571 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
24cb230b
MC
7572 BNX2_SHM_HDR_SIGNATURE_SIG) {
7573 u32 off = PCI_FUNC(pdev->devfn) << 2;
7574
2726d6e1 7575 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
24cb230b 7576 } else
e3648b3d
MC
7577 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7578
b6016b76
MC
7579 /* Get the permanent MAC address. First we need to make sure the
7580 * firmware is actually running.
7581 */
2726d6e1 7582 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
b6016b76
MC
7583
7584 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7585 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
9b91cf9d 7586 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
b6016b76
MC
7587 rc = -ENODEV;
7588 goto err_out_unmap;
7589 }
7590
2726d6e1 7591 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
58fc2ea4
MC
7592 for (i = 0, j = 0; i < 3; i++) {
7593 u8 num, k, skip0;
7594
7595 num = (u8) (reg >> (24 - (i * 8)));
7596 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7597 if (num >= k || !skip0 || k == 1) {
7598 bp->fw_version[j++] = (num / k) + '0';
7599 skip0 = 0;
7600 }
7601 }
7602 if (i != 2)
7603 bp->fw_version[j++] = '.';
7604 }
2726d6e1 7605 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
846f5c62
MC
7606 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7607 bp->wol = 1;
7608
7609 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
f86e82fb 7610 bp->flags |= BNX2_FLAG_ASF_ENABLE;
c2d3db8c
MC
7611
7612 for (i = 0; i < 30; i++) {
2726d6e1 7613 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
c2d3db8c
MC
7614 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7615 break;
7616 msleep(10);
7617 }
7618 }
2726d6e1 7619 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
58fc2ea4
MC
7620 reg &= BNX2_CONDITION_MFW_RUN_MASK;
7621 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7622 reg != BNX2_CONDITION_MFW_RUN_NONE) {
2726d6e1 7623 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
58fc2ea4
MC
7624
7625 bp->fw_version[j++] = ' ';
7626 for (i = 0; i < 3; i++) {
2726d6e1 7627 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
58fc2ea4
MC
7628 reg = swab32(reg);
7629 memcpy(&bp->fw_version[j], &reg, 4);
7630 j += 4;
7631 }
7632 }
b6016b76 7633
2726d6e1 7634 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
b6016b76
MC
7635 bp->mac_addr[0] = (u8) (reg >> 8);
7636 bp->mac_addr[1] = (u8) reg;
7637
2726d6e1 7638 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
b6016b76
MC
7639 bp->mac_addr[2] = (u8) (reg >> 24);
7640 bp->mac_addr[3] = (u8) (reg >> 16);
7641 bp->mac_addr[4] = (u8) (reg >> 8);
7642 bp->mac_addr[5] = (u8) reg;
7643
7644 bp->tx_ring_size = MAX_TX_DESC_CNT;
932f3772 7645 bnx2_set_rx_ring_size(bp, 255);
b6016b76
MC
7646
7647 bp->rx_csum = 1;
7648
b6016b76
MC
7649 bp->tx_quick_cons_trip_int = 20;
7650 bp->tx_quick_cons_trip = 20;
7651 bp->tx_ticks_int = 80;
7652 bp->tx_ticks = 80;
6aa20a22 7653
b6016b76
MC
7654 bp->rx_quick_cons_trip_int = 6;
7655 bp->rx_quick_cons_trip = 6;
7656 bp->rx_ticks_int = 18;
7657 bp->rx_ticks = 18;
7658
7ea6920e 7659 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
b6016b76 7660
ac392abc 7661 bp->current_interval = BNX2_TIMER_INTERVAL;
b6016b76 7662
5b0c76ad
MC
7663 bp->phy_addr = 1;
7664
b6016b76 7665 /* Disable WOL support if we are running on a SERDES chip. */
253c8b75
MC
7666 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7667 bnx2_get_5709_media(bp);
7668 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
583c28e5 7669 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
bac0dff6 7670
0d8a6571 7671 bp->phy_port = PORT_TP;
583c28e5 7672 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
0d8a6571 7673 bp->phy_port = PORT_FIBRE;
2726d6e1 7674 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
846f5c62 7675 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
f86e82fb 7676 bp->flags |= BNX2_FLAG_NO_WOL;
846f5c62
MC
7677 bp->wol = 0;
7678 }
38ea3686
MC
7679 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
7680 /* Don't do parallel detect on this board because of
7681 * some board problems. The link will not go down
7682 * if we do parallel detect.
7683 */
7684 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
7685 pdev->subsystem_device == 0x310c)
7686 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
7687 } else {
5b0c76ad 7688 bp->phy_addr = 2;
5b0c76ad 7689 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
583c28e5 7690 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
5b0c76ad 7691 }
261dd5ca
MC
7692 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7693 CHIP_NUM(bp) == CHIP_NUM_5708)
583c28e5 7694 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
fb0c18bd
MC
7695 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7696 (CHIP_REV(bp) == CHIP_REV_Ax ||
7697 CHIP_REV(bp) == CHIP_REV_Bx))
583c28e5 7698 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
b6016b76 7699
7c62e83b
MC
7700 bnx2_init_fw_cap(bp);
7701
16088272
MC
7702 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7703 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
5ec6d7bf
MC
7704 (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
7705 !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
f86e82fb 7706 bp->flags |= BNX2_FLAG_NO_WOL;
846f5c62
MC
7707 bp->wol = 0;
7708 }
dda1e390 7709
b6016b76
MC
7710 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7711 bp->tx_quick_cons_trip_int =
7712 bp->tx_quick_cons_trip;
7713 bp->tx_ticks_int = bp->tx_ticks;
7714 bp->rx_quick_cons_trip_int =
7715 bp->rx_quick_cons_trip;
7716 bp->rx_ticks_int = bp->rx_ticks;
7717 bp->comp_prod_trip_int = bp->comp_prod_trip;
7718 bp->com_ticks_int = bp->com_ticks;
7719 bp->cmd_ticks_int = bp->cmd_ticks;
7720 }
7721
f9317a40
MC
7722 /* Disable MSI on 5706 if AMD 8132 bridge is found.
7723 *
7724 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
7725 * with byte enables disabled on the unused 32-bit word. This is legal
7726 * but causes problems on the AMD 8132 which will eventually stop
7727 * responding after a while.
7728 *
7729 * AMD believes this incompatibility is unique to the 5706, and
88187dfa 7730 * prefers to locally disable MSI rather than globally disabling it.
f9317a40
MC
7731 */
7732 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7733 struct pci_dev *amd_8132 = NULL;
7734
7735 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7736 PCI_DEVICE_ID_AMD_8132_BRIDGE,
7737 amd_8132))) {
f9317a40 7738
44c10138
AK
7739 if (amd_8132->revision >= 0x10 &&
7740 amd_8132->revision <= 0x13) {
f9317a40
MC
7741 disable_msi = 1;
7742 pci_dev_put(amd_8132);
7743 break;
7744 }
7745 }
7746 }
7747
deaf391b 7748 bnx2_set_default_link(bp);
b6016b76
MC
7749 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7750
cd339a0e 7751 init_timer(&bp->timer);
ac392abc 7752 bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
cd339a0e
MC
7753 bp->timer.data = (unsigned long) bp;
7754 bp->timer.function = bnx2_timer;
7755
b6016b76
MC
7756 return 0;
7757
7758err_out_unmap:
7759 if (bp->regview) {
7760 iounmap(bp->regview);
73eef4cd 7761 bp->regview = NULL;
b6016b76
MC
7762 }
7763
7764err_out_release:
7765 pci_release_regions(pdev);
7766
7767err_out_disable:
7768 pci_disable_device(pdev);
7769 pci_set_drvdata(pdev, NULL);
7770
7771err_out:
7772 return rc;
7773}
7774
883e5151
MC
7775static char * __devinit
7776bnx2_bus_string(struct bnx2 *bp, char *str)
7777{
7778 char *s = str;
7779
f86e82fb 7780 if (bp->flags & BNX2_FLAG_PCIE) {
883e5151
MC
7781 s += sprintf(s, "PCI Express");
7782 } else {
7783 s += sprintf(s, "PCI");
f86e82fb 7784 if (bp->flags & BNX2_FLAG_PCIX)
883e5151 7785 s += sprintf(s, "-X");
f86e82fb 7786 if (bp->flags & BNX2_FLAG_PCI_32BIT)
883e5151
MC
7787 s += sprintf(s, " 32-bit");
7788 else
7789 s += sprintf(s, " 64-bit");
7790 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7791 }
7792 return str;
7793}
7794
2ba582b7 7795static void __devinit
35efa7c1
MC
7796bnx2_init_napi(struct bnx2 *bp)
7797{
b4b36042 7798 int i;
35efa7c1 7799
b4b36042 7800 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
35e9010b
MC
7801 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
7802 int (*poll)(struct napi_struct *, int);
7803
7804 if (i == 0)
7805 poll = bnx2_poll;
7806 else
f0ea2e63 7807 poll = bnx2_poll_msix;
35e9010b
MC
7808
7809 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
b4b36042
MC
7810 bnapi->bp = bp;
7811 }
35efa7c1
MC
7812}
7813
0421eae6
SH
7814static const struct net_device_ops bnx2_netdev_ops = {
7815 .ndo_open = bnx2_open,
7816 .ndo_start_xmit = bnx2_start_xmit,
7817 .ndo_stop = bnx2_close,
7818 .ndo_get_stats = bnx2_get_stats,
7819 .ndo_set_rx_mode = bnx2_set_rx_mode,
7820 .ndo_do_ioctl = bnx2_ioctl,
7821 .ndo_validate_addr = eth_validate_addr,
7822 .ndo_set_mac_address = bnx2_change_mac_addr,
7823 .ndo_change_mtu = bnx2_change_mtu,
7824 .ndo_tx_timeout = bnx2_tx_timeout,
7825#ifdef BCM_VLAN
7826 .ndo_vlan_rx_register = bnx2_vlan_rx_register,
7827#endif
7828#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7829 .ndo_poll_controller = poll_bnx2,
7830#endif
7831};
7832
b6016b76
MC
7833static int __devinit
7834bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7835{
7836 static int version_printed = 0;
7837 struct net_device *dev = NULL;
7838 struct bnx2 *bp;
0795af57 7839 int rc;
883e5151 7840 char str[40];
b6016b76
MC
7841
7842 if (version_printed++ == 0)
7843 printk(KERN_INFO "%s", version);
7844
7845 /* dev zeroed in init_etherdev */
706bf240 7846 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
b6016b76
MC
7847
7848 if (!dev)
7849 return -ENOMEM;
7850
7851 rc = bnx2_init_board(pdev, dev);
7852 if (rc < 0) {
7853 free_netdev(dev);
7854 return rc;
7855 }
7856
0421eae6 7857 dev->netdev_ops = &bnx2_netdev_ops;
b6016b76 7858 dev->watchdog_timeo = TX_TIMEOUT;
b6016b76 7859 dev->ethtool_ops = &bnx2_ethtool_ops;
b6016b76 7860
972ec0d4 7861 bp = netdev_priv(dev);
35efa7c1 7862 bnx2_init_napi(bp);
b6016b76 7863
1b2f922f
MC
7864 pci_set_drvdata(pdev, dev);
7865
57579f76
MC
7866 rc = bnx2_request_firmware(bp);
7867 if (rc)
7868 goto error;
7869
1b2f922f
MC
7870 memcpy(dev->dev_addr, bp->mac_addr, 6);
7871 memcpy(dev->perm_addr, bp->mac_addr, 6);
1b2f922f 7872
d212f87b 7873 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
4666f87a 7874 if (CHIP_NUM(bp) == CHIP_NUM_5709)
d212f87b
SH
7875 dev->features |= NETIF_F_IPV6_CSUM;
7876
1b2f922f
MC
7877#ifdef BCM_VLAN
7878 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7879#endif
7880 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
4666f87a
MC
7881 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7882 dev->features |= NETIF_F_TSO6;
1b2f922f 7883
b6016b76 7884 if ((rc = register_netdev(dev))) {
9b91cf9d 7885 dev_err(&pdev->dev, "Cannot register net device\n");
57579f76 7886 goto error;
b6016b76
MC
7887 }
7888
883e5151 7889 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
e174961c 7890 "IRQ %d, node addr %pM\n",
b6016b76 7891 dev->name,
fbbf68b7 7892 board_info[ent->driver_data].name,
b6016b76
MC
7893 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7894 ((CHIP_ID(bp) & 0x0ff0) >> 4),
883e5151 7895 bnx2_bus_string(bp, str),
b6016b76 7896 dev->base_addr,
e174961c 7897 bp->pdev->irq, dev->dev_addr);
b6016b76 7898
b6016b76 7899 return 0;
57579f76
MC
7900
7901error:
7902 if (bp->mips_firmware)
7903 release_firmware(bp->mips_firmware);
7904 if (bp->rv2p_firmware)
7905 release_firmware(bp->rv2p_firmware);
7906
7907 if (bp->regview)
7908 iounmap(bp->regview);
7909 pci_release_regions(pdev);
7910 pci_disable_device(pdev);
7911 pci_set_drvdata(pdev, NULL);
7912 free_netdev(dev);
7913 return rc;
b6016b76
MC
7914}
7915
7916static void __devexit
7917bnx2_remove_one(struct pci_dev *pdev)
7918{
7919 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 7920 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7921
afdc08b9
MC
7922 flush_scheduled_work();
7923
b6016b76
MC
7924 unregister_netdev(dev);
7925
57579f76
MC
7926 if (bp->mips_firmware)
7927 release_firmware(bp->mips_firmware);
7928 if (bp->rv2p_firmware)
7929 release_firmware(bp->rv2p_firmware);
7930
b6016b76
MC
7931 if (bp->regview)
7932 iounmap(bp->regview);
7933
7934 free_netdev(dev);
7935 pci_release_regions(pdev);
7936 pci_disable_device(pdev);
7937 pci_set_drvdata(pdev, NULL);
7938}
7939
7940static int
829ca9a3 7941bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
b6016b76
MC
7942{
7943 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 7944 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7945
6caebb02
MC
7946 /* PCI register 4 needs to be saved whether netif_running() or not.
7947 * MSI address and data need to be saved if using MSI and
7948 * netif_running().
7949 */
7950 pci_save_state(pdev);
b6016b76
MC
7951 if (!netif_running(dev))
7952 return 0;
7953
1d60290f 7954 flush_scheduled_work();
b6016b76
MC
7955 bnx2_netif_stop(bp);
7956 netif_device_detach(dev);
7957 del_timer_sync(&bp->timer);
74bf4ba3 7958 bnx2_shutdown_chip(bp);
b6016b76 7959 bnx2_free_skbs(bp);
829ca9a3 7960 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
b6016b76
MC
7961 return 0;
7962}
7963
7964static int
7965bnx2_resume(struct pci_dev *pdev)
7966{
7967 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 7968 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7969
6caebb02 7970 pci_restore_state(pdev);
b6016b76
MC
7971 if (!netif_running(dev))
7972 return 0;
7973
829ca9a3 7974 bnx2_set_power_state(bp, PCI_D0);
b6016b76 7975 netif_device_attach(dev);
9a120bc5 7976 bnx2_init_nic(bp, 1);
b6016b76
MC
7977 bnx2_netif_start(bp);
7978 return 0;
7979}
7980
6ff2da49
WX
7981/**
7982 * bnx2_io_error_detected - called when PCI error is detected
7983 * @pdev: Pointer to PCI device
7984 * @state: The current pci connection state
7985 *
7986 * This function is called after a PCI bus error affecting
7987 * this device has been detected.
7988 */
7989static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
7990 pci_channel_state_t state)
7991{
7992 struct net_device *dev = pci_get_drvdata(pdev);
7993 struct bnx2 *bp = netdev_priv(dev);
7994
7995 rtnl_lock();
7996 netif_device_detach(dev);
7997
7998 if (netif_running(dev)) {
7999 bnx2_netif_stop(bp);
8000 del_timer_sync(&bp->timer);
8001 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8002 }
8003
8004 pci_disable_device(pdev);
8005 rtnl_unlock();
8006
8007 /* Request a slot slot reset. */
8008 return PCI_ERS_RESULT_NEED_RESET;
8009}
8010
8011/**
8012 * bnx2_io_slot_reset - called after the pci bus has been reset.
8013 * @pdev: Pointer to PCI device
8014 *
8015 * Restart the card from scratch, as if from a cold-boot.
8016 */
8017static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8018{
8019 struct net_device *dev = pci_get_drvdata(pdev);
8020 struct bnx2 *bp = netdev_priv(dev);
8021
8022 rtnl_lock();
8023 if (pci_enable_device(pdev)) {
8024 dev_err(&pdev->dev,
8025 "Cannot re-enable PCI device after reset.\n");
8026 rtnl_unlock();
8027 return PCI_ERS_RESULT_DISCONNECT;
8028 }
8029 pci_set_master(pdev);
8030 pci_restore_state(pdev);
8031
8032 if (netif_running(dev)) {
8033 bnx2_set_power_state(bp, PCI_D0);
8034 bnx2_init_nic(bp, 1);
8035 }
8036
8037 rtnl_unlock();
8038 return PCI_ERS_RESULT_RECOVERED;
8039}
8040
8041/**
8042 * bnx2_io_resume - called when traffic can start flowing again.
8043 * @pdev: Pointer to PCI device
8044 *
8045 * This callback is called when the error recovery driver tells us that
8046 * its OK to resume normal operation.
8047 */
8048static void bnx2_io_resume(struct pci_dev *pdev)
8049{
8050 struct net_device *dev = pci_get_drvdata(pdev);
8051 struct bnx2 *bp = netdev_priv(dev);
8052
8053 rtnl_lock();
8054 if (netif_running(dev))
8055 bnx2_netif_start(bp);
8056
8057 netif_device_attach(dev);
8058 rtnl_unlock();
8059}
8060
8061static struct pci_error_handlers bnx2_err_handler = {
8062 .error_detected = bnx2_io_error_detected,
8063 .slot_reset = bnx2_io_slot_reset,
8064 .resume = bnx2_io_resume,
8065};
8066
b6016b76 8067static struct pci_driver bnx2_pci_driver = {
14ab9b86
PH
8068 .name = DRV_MODULE_NAME,
8069 .id_table = bnx2_pci_tbl,
8070 .probe = bnx2_init_one,
8071 .remove = __devexit_p(bnx2_remove_one),
8072 .suspend = bnx2_suspend,
8073 .resume = bnx2_resume,
6ff2da49 8074 .err_handler = &bnx2_err_handler,
b6016b76
MC
8075};
8076
8077static int __init bnx2_init(void)
8078{
29917620 8079 return pci_register_driver(&bnx2_pci_driver);
b6016b76
MC
8080}
8081
8082static void __exit bnx2_cleanup(void)
8083{
8084 pci_unregister_driver(&bnx2_pci_driver);
8085}
8086
8087module_init(bnx2_init);
8088module_exit(bnx2_cleanup);
8089
8090
8091