]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2.c
[BNX2]: Improve SerDes handling.
[net-next-2.6.git] / drivers / net / bnx2.c
CommitLineData
b6016b76
MC
1/* bnx2.c: Broadcom NX2 network driver.
2 *
206cc83c 3 * Copyright (c) 2004, 2005, 2006 Broadcom Corporation
b6016b76
MC
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
f2a4f052
MC
12
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15
16#include <linux/kernel.h>
17#include <linux/timer.h>
18#include <linux/errno.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/interrupt.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/dma-mapping.h>
29#include <asm/bitops.h>
30#include <asm/io.h>
31#include <asm/irq.h>
32#include <linux/delay.h>
33#include <asm/byteorder.h>
c86a31f4 34#include <asm/page.h>
f2a4f052
MC
35#include <linux/time.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
38#ifdef NETIF_F_HW_VLAN_TX
39#include <linux/if_vlan.h>
40#define BCM_VLAN 1
41#endif
42#ifdef NETIF_F_TSO
43#include <net/ip.h>
44#include <net/tcp.h>
45#include <net/checksum.h>
46#define BCM_TSO 1
47#endif
48#include <linux/workqueue.h>
49#include <linux/crc32.h>
50#include <linux/prefetch.h>
29b12174 51#include <linux/cache.h>
fba9fe91 52#include <linux/zlib.h>
f2a4f052 53
b6016b76
MC
54#include "bnx2.h"
55#include "bnx2_fw.h"
56
57#define DRV_MODULE_NAME "bnx2"
58#define PFX DRV_MODULE_NAME ": "
f9317a40
MC
59#define DRV_MODULE_VERSION "1.4.45"
60#define DRV_MODULE_RELDATE "September 29, 2006"
b6016b76
MC
61
62#define RUN_AT(x) (jiffies + (x))
63
64/* Time in jiffies before concluding the transmitter is hung. */
65#define TX_TIMEOUT (5*HZ)
66
e19360f2 67static const char version[] __devinitdata =
b6016b76
MC
68 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
69
70MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
05d0f1cf 71MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
b6016b76
MC
72MODULE_LICENSE("GPL");
73MODULE_VERSION(DRV_MODULE_VERSION);
74
75static int disable_msi = 0;
76
77module_param(disable_msi, int, 0);
78MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
79
80typedef enum {
81 BCM5706 = 0,
82 NC370T,
83 NC370I,
84 BCM5706S,
85 NC370F,
5b0c76ad
MC
86 BCM5708,
87 BCM5708S,
b6016b76
MC
88} board_t;
89
90/* indexed by board_t, above */
f71e1309 91static const struct {
b6016b76
MC
92 char *name;
93} board_info[] __devinitdata = {
94 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95 { "HP NC370T Multifunction Gigabit Server Adapter" },
96 { "HP NC370i Multifunction Gigabit Server Adapter" },
97 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98 { "HP NC370F Multifunction Gigabit Server Adapter" },
5b0c76ad
MC
99 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
b6016b76
MC
101 };
102
103static struct pci_device_id bnx2_pci_tbl[] = {
104 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
105 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
106 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
5b0c76ad
MC
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
b6016b76
MC
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
113 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
5b0c76ad
MC
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
b6016b76
MC
118 { 0, }
119};
120
121static struct flash_spec flash_table[] =
122{
123 /* Slow EEPROM */
37137709 124 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
b6016b76
MC
125 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
126 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
127 "EEPROM - slow"},
37137709
MC
128 /* Expansion entry 0001 */
129 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
b6016b76 130 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
131 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
132 "Entry 0001"},
b6016b76
MC
133 /* Saifun SA25F010 (non-buffered flash) */
134 /* strap, cfg1, & write1 need updates */
37137709 135 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
b6016b76
MC
136 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
137 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
138 "Non-buffered flash (128kB)"},
139 /* Saifun SA25F020 (non-buffered flash) */
140 /* strap, cfg1, & write1 need updates */
37137709 141 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
b6016b76
MC
142 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
143 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
144 "Non-buffered flash (256kB)"},
37137709
MC
145 /* Expansion entry 0100 */
146 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
147 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
148 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
149 "Entry 0100"},
150 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
6aa20a22 151 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
37137709
MC
152 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
153 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
154 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
155 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
156 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
157 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
158 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
159 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
160 /* Saifun SA25F005 (non-buffered flash) */
161 /* strap, cfg1, & write1 need updates */
162 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
163 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
164 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
165 "Non-buffered flash (64kB)"},
166 /* Fast EEPROM */
167 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
168 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
169 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
170 "EEPROM - fast"},
171 /* Expansion entry 1001 */
172 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
173 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
175 "Entry 1001"},
176 /* Expansion entry 1010 */
177 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
178 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
180 "Entry 1010"},
181 /* ATMEL AT45DB011B (buffered flash) */
182 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
183 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
184 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
185 "Buffered flash (128kB)"},
186 /* Expansion entry 1100 */
187 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
188 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
189 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
190 "Entry 1100"},
191 /* Expansion entry 1101 */
192 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
193 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
194 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
195 "Entry 1101"},
196 /* Ateml Expansion entry 1110 */
197 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
198 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
199 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
200 "Entry 1110 (Atmel)"},
201 /* ATMEL AT45DB021B (buffered flash) */
202 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
203 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
204 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
205 "Buffered flash (256kB)"},
b6016b76
MC
206};
207
208MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
209
e89bbf10
MC
210static inline u32 bnx2_tx_avail(struct bnx2 *bp)
211{
2f8af120 212 u32 diff;
e89bbf10 213
2f8af120
MC
214 smp_mb();
215 diff = TX_RING_IDX(bp->tx_prod) - TX_RING_IDX(bp->tx_cons);
e89bbf10
MC
216 if (diff > MAX_TX_DESC_CNT)
217 diff = (diff & MAX_TX_DESC_CNT) - 1;
218 return (bp->tx_ring_size - diff);
219}
220
b6016b76
MC
221static u32
222bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
223{
224 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
225 return (REG_RD(bp, BNX2_PCICFG_REG_WINDOW));
226}
227
228static void
229bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
230{
231 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
232 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
233}
234
235static void
236bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
237{
238 offset += cid_addr;
239 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
240 REG_WR(bp, BNX2_CTX_DATA, val);
241}
242
243static int
244bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
245{
246 u32 val1;
247 int i, ret;
248
249 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
250 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
251 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
252
253 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
254 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
255
256 udelay(40);
257 }
258
259 val1 = (bp->phy_addr << 21) | (reg << 16) |
260 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
261 BNX2_EMAC_MDIO_COMM_START_BUSY;
262 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
263
264 for (i = 0; i < 50; i++) {
265 udelay(10);
266
267 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
268 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
269 udelay(5);
270
271 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
272 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
273
274 break;
275 }
276 }
277
278 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
279 *val = 0x0;
280 ret = -EBUSY;
281 }
282 else {
283 *val = val1;
284 ret = 0;
285 }
286
287 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
288 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
289 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
290
291 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
292 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
293
294 udelay(40);
295 }
296
297 return ret;
298}
299
300static int
301bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
302{
303 u32 val1;
304 int i, ret;
305
306 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
307 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
308 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
309
310 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
311 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
312
313 udelay(40);
314 }
315
316 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
317 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
318 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
319 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
6aa20a22 320
b6016b76
MC
321 for (i = 0; i < 50; i++) {
322 udelay(10);
323
324 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
325 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
326 udelay(5);
327 break;
328 }
329 }
330
331 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
332 ret = -EBUSY;
333 else
334 ret = 0;
335
336 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
337 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
338 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
339
340 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
341 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
342
343 udelay(40);
344 }
345
346 return ret;
347}
348
349static void
350bnx2_disable_int(struct bnx2 *bp)
351{
352 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
353 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
354 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
355}
356
357static void
358bnx2_enable_int(struct bnx2 *bp)
359{
1269a8a6
MC
360 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
361 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
362 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
363
b6016b76
MC
364 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
365 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
366
bf5295bb 367 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
b6016b76
MC
368}
369
370static void
371bnx2_disable_int_sync(struct bnx2 *bp)
372{
373 atomic_inc(&bp->intr_sem);
374 bnx2_disable_int(bp);
375 synchronize_irq(bp->pdev->irq);
376}
377
378static void
379bnx2_netif_stop(struct bnx2 *bp)
380{
381 bnx2_disable_int_sync(bp);
382 if (netif_running(bp->dev)) {
383 netif_poll_disable(bp->dev);
384 netif_tx_disable(bp->dev);
385 bp->dev->trans_start = jiffies; /* prevent tx timeout */
386 }
387}
388
389static void
390bnx2_netif_start(struct bnx2 *bp)
391{
392 if (atomic_dec_and_test(&bp->intr_sem)) {
393 if (netif_running(bp->dev)) {
394 netif_wake_queue(bp->dev);
395 netif_poll_enable(bp->dev);
396 bnx2_enable_int(bp);
397 }
398 }
399}
400
401static void
402bnx2_free_mem(struct bnx2 *bp)
403{
13daffa2
MC
404 int i;
405
b6016b76 406 if (bp->status_blk) {
0f31f994 407 pci_free_consistent(bp->pdev, bp->status_stats_size,
b6016b76
MC
408 bp->status_blk, bp->status_blk_mapping);
409 bp->status_blk = NULL;
0f31f994 410 bp->stats_blk = NULL;
b6016b76
MC
411 }
412 if (bp->tx_desc_ring) {
413 pci_free_consistent(bp->pdev,
414 sizeof(struct tx_bd) * TX_DESC_CNT,
415 bp->tx_desc_ring, bp->tx_desc_mapping);
416 bp->tx_desc_ring = NULL;
417 }
b4558ea9
JJ
418 kfree(bp->tx_buf_ring);
419 bp->tx_buf_ring = NULL;
13daffa2
MC
420 for (i = 0; i < bp->rx_max_ring; i++) {
421 if (bp->rx_desc_ring[i])
422 pci_free_consistent(bp->pdev,
423 sizeof(struct rx_bd) * RX_DESC_CNT,
424 bp->rx_desc_ring[i],
425 bp->rx_desc_mapping[i]);
426 bp->rx_desc_ring[i] = NULL;
427 }
428 vfree(bp->rx_buf_ring);
b4558ea9 429 bp->rx_buf_ring = NULL;
b6016b76
MC
430}
431
432static int
433bnx2_alloc_mem(struct bnx2 *bp)
434{
0f31f994 435 int i, status_blk_size;
13daffa2 436
0f31f994
MC
437 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
438 GFP_KERNEL);
b6016b76
MC
439 if (bp->tx_buf_ring == NULL)
440 return -ENOMEM;
441
b6016b76
MC
442 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
443 sizeof(struct tx_bd) *
444 TX_DESC_CNT,
445 &bp->tx_desc_mapping);
446 if (bp->tx_desc_ring == NULL)
447 goto alloc_mem_err;
448
13daffa2
MC
449 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
450 bp->rx_max_ring);
b6016b76
MC
451 if (bp->rx_buf_ring == NULL)
452 goto alloc_mem_err;
453
13daffa2
MC
454 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
455 bp->rx_max_ring);
456
457 for (i = 0; i < bp->rx_max_ring; i++) {
458 bp->rx_desc_ring[i] =
459 pci_alloc_consistent(bp->pdev,
460 sizeof(struct rx_bd) * RX_DESC_CNT,
461 &bp->rx_desc_mapping[i]);
462 if (bp->rx_desc_ring[i] == NULL)
463 goto alloc_mem_err;
464
465 }
b6016b76 466
0f31f994
MC
467 /* Combine status and statistics blocks into one allocation. */
468 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
469 bp->status_stats_size = status_blk_size +
470 sizeof(struct statistics_block);
471
472 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
b6016b76
MC
473 &bp->status_blk_mapping);
474 if (bp->status_blk == NULL)
475 goto alloc_mem_err;
476
0f31f994 477 memset(bp->status_blk, 0, bp->status_stats_size);
b6016b76 478
0f31f994
MC
479 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
480 status_blk_size);
b6016b76 481
0f31f994 482 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
b6016b76
MC
483
484 return 0;
485
486alloc_mem_err:
487 bnx2_free_mem(bp);
488 return -ENOMEM;
489}
490
e3648b3d
MC
491static void
492bnx2_report_fw_link(struct bnx2 *bp)
493{
494 u32 fw_link_status = 0;
495
496 if (bp->link_up) {
497 u32 bmsr;
498
499 switch (bp->line_speed) {
500 case SPEED_10:
501 if (bp->duplex == DUPLEX_HALF)
502 fw_link_status = BNX2_LINK_STATUS_10HALF;
503 else
504 fw_link_status = BNX2_LINK_STATUS_10FULL;
505 break;
506 case SPEED_100:
507 if (bp->duplex == DUPLEX_HALF)
508 fw_link_status = BNX2_LINK_STATUS_100HALF;
509 else
510 fw_link_status = BNX2_LINK_STATUS_100FULL;
511 break;
512 case SPEED_1000:
513 if (bp->duplex == DUPLEX_HALF)
514 fw_link_status = BNX2_LINK_STATUS_1000HALF;
515 else
516 fw_link_status = BNX2_LINK_STATUS_1000FULL;
517 break;
518 case SPEED_2500:
519 if (bp->duplex == DUPLEX_HALF)
520 fw_link_status = BNX2_LINK_STATUS_2500HALF;
521 else
522 fw_link_status = BNX2_LINK_STATUS_2500FULL;
523 break;
524 }
525
526 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
527
528 if (bp->autoneg) {
529 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
530
531 bnx2_read_phy(bp, MII_BMSR, &bmsr);
532 bnx2_read_phy(bp, MII_BMSR, &bmsr);
533
534 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
535 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
536 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
537 else
538 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
539 }
540 }
541 else
542 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
543
544 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
545}
546
b6016b76
MC
547static void
548bnx2_report_link(struct bnx2 *bp)
549{
550 if (bp->link_up) {
551 netif_carrier_on(bp->dev);
552 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
553
554 printk("%d Mbps ", bp->line_speed);
555
556 if (bp->duplex == DUPLEX_FULL)
557 printk("full duplex");
558 else
559 printk("half duplex");
560
561 if (bp->flow_ctrl) {
562 if (bp->flow_ctrl & FLOW_CTRL_RX) {
563 printk(", receive ");
564 if (bp->flow_ctrl & FLOW_CTRL_TX)
565 printk("& transmit ");
566 }
567 else {
568 printk(", transmit ");
569 }
570 printk("flow control ON");
571 }
572 printk("\n");
573 }
574 else {
575 netif_carrier_off(bp->dev);
576 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
577 }
e3648b3d
MC
578
579 bnx2_report_fw_link(bp);
b6016b76
MC
580}
581
582static void
583bnx2_resolve_flow_ctrl(struct bnx2 *bp)
584{
585 u32 local_adv, remote_adv;
586
587 bp->flow_ctrl = 0;
6aa20a22 588 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
b6016b76
MC
589 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
590
591 if (bp->duplex == DUPLEX_FULL) {
592 bp->flow_ctrl = bp->req_flow_ctrl;
593 }
594 return;
595 }
596
597 if (bp->duplex != DUPLEX_FULL) {
598 return;
599 }
600
5b0c76ad
MC
601 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
602 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
603 u32 val;
604
605 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
606 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
607 bp->flow_ctrl |= FLOW_CTRL_TX;
608 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
609 bp->flow_ctrl |= FLOW_CTRL_RX;
610 return;
611 }
612
b6016b76
MC
613 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
614 bnx2_read_phy(bp, MII_LPA, &remote_adv);
615
616 if (bp->phy_flags & PHY_SERDES_FLAG) {
617 u32 new_local_adv = 0;
618 u32 new_remote_adv = 0;
619
620 if (local_adv & ADVERTISE_1000XPAUSE)
621 new_local_adv |= ADVERTISE_PAUSE_CAP;
622 if (local_adv & ADVERTISE_1000XPSE_ASYM)
623 new_local_adv |= ADVERTISE_PAUSE_ASYM;
624 if (remote_adv & ADVERTISE_1000XPAUSE)
625 new_remote_adv |= ADVERTISE_PAUSE_CAP;
626 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
627 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
628
629 local_adv = new_local_adv;
630 remote_adv = new_remote_adv;
631 }
632
633 /* See Table 28B-3 of 802.3ab-1999 spec. */
634 if (local_adv & ADVERTISE_PAUSE_CAP) {
635 if(local_adv & ADVERTISE_PAUSE_ASYM) {
636 if (remote_adv & ADVERTISE_PAUSE_CAP) {
637 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
638 }
639 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
640 bp->flow_ctrl = FLOW_CTRL_RX;
641 }
642 }
643 else {
644 if (remote_adv & ADVERTISE_PAUSE_CAP) {
645 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
646 }
647 }
648 }
649 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
650 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
651 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
652
653 bp->flow_ctrl = FLOW_CTRL_TX;
654 }
655 }
656}
657
658static int
5b0c76ad
MC
659bnx2_5708s_linkup(struct bnx2 *bp)
660{
661 u32 val;
662
663 bp->link_up = 1;
664 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
665 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
666 case BCM5708S_1000X_STAT1_SPEED_10:
667 bp->line_speed = SPEED_10;
668 break;
669 case BCM5708S_1000X_STAT1_SPEED_100:
670 bp->line_speed = SPEED_100;
671 break;
672 case BCM5708S_1000X_STAT1_SPEED_1G:
673 bp->line_speed = SPEED_1000;
674 break;
675 case BCM5708S_1000X_STAT1_SPEED_2G5:
676 bp->line_speed = SPEED_2500;
677 break;
678 }
679 if (val & BCM5708S_1000X_STAT1_FD)
680 bp->duplex = DUPLEX_FULL;
681 else
682 bp->duplex = DUPLEX_HALF;
683
684 return 0;
685}
686
687static int
688bnx2_5706s_linkup(struct bnx2 *bp)
b6016b76
MC
689{
690 u32 bmcr, local_adv, remote_adv, common;
691
692 bp->link_up = 1;
693 bp->line_speed = SPEED_1000;
694
695 bnx2_read_phy(bp, MII_BMCR, &bmcr);
696 if (bmcr & BMCR_FULLDPLX) {
697 bp->duplex = DUPLEX_FULL;
698 }
699 else {
700 bp->duplex = DUPLEX_HALF;
701 }
702
703 if (!(bmcr & BMCR_ANENABLE)) {
704 return 0;
705 }
706
707 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
708 bnx2_read_phy(bp, MII_LPA, &remote_adv);
709
710 common = local_adv & remote_adv;
711 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
712
713 if (common & ADVERTISE_1000XFULL) {
714 bp->duplex = DUPLEX_FULL;
715 }
716 else {
717 bp->duplex = DUPLEX_HALF;
718 }
719 }
720
721 return 0;
722}
723
724static int
725bnx2_copper_linkup(struct bnx2 *bp)
726{
727 u32 bmcr;
728
729 bnx2_read_phy(bp, MII_BMCR, &bmcr);
730 if (bmcr & BMCR_ANENABLE) {
731 u32 local_adv, remote_adv, common;
732
733 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
734 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
735
736 common = local_adv & (remote_adv >> 2);
737 if (common & ADVERTISE_1000FULL) {
738 bp->line_speed = SPEED_1000;
739 bp->duplex = DUPLEX_FULL;
740 }
741 else if (common & ADVERTISE_1000HALF) {
742 bp->line_speed = SPEED_1000;
743 bp->duplex = DUPLEX_HALF;
744 }
745 else {
746 bnx2_read_phy(bp, MII_ADVERTISE, &local_adv);
747 bnx2_read_phy(bp, MII_LPA, &remote_adv);
748
749 common = local_adv & remote_adv;
750 if (common & ADVERTISE_100FULL) {
751 bp->line_speed = SPEED_100;
752 bp->duplex = DUPLEX_FULL;
753 }
754 else if (common & ADVERTISE_100HALF) {
755 bp->line_speed = SPEED_100;
756 bp->duplex = DUPLEX_HALF;
757 }
758 else if (common & ADVERTISE_10FULL) {
759 bp->line_speed = SPEED_10;
760 bp->duplex = DUPLEX_FULL;
761 }
762 else if (common & ADVERTISE_10HALF) {
763 bp->line_speed = SPEED_10;
764 bp->duplex = DUPLEX_HALF;
765 }
766 else {
767 bp->line_speed = 0;
768 bp->link_up = 0;
769 }
770 }
771 }
772 else {
773 if (bmcr & BMCR_SPEED100) {
774 bp->line_speed = SPEED_100;
775 }
776 else {
777 bp->line_speed = SPEED_10;
778 }
779 if (bmcr & BMCR_FULLDPLX) {
780 bp->duplex = DUPLEX_FULL;
781 }
782 else {
783 bp->duplex = DUPLEX_HALF;
784 }
785 }
786
787 return 0;
788}
789
790static int
791bnx2_set_mac_link(struct bnx2 *bp)
792{
793 u32 val;
794
795 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
796 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
797 (bp->duplex == DUPLEX_HALF)) {
798 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
799 }
800
801 /* Configure the EMAC mode register. */
802 val = REG_RD(bp, BNX2_EMAC_MODE);
803
804 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
5b0c76ad
MC
805 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
806 BNX2_EMAC_MODE_25G);
b6016b76
MC
807
808 if (bp->link_up) {
5b0c76ad
MC
809 switch (bp->line_speed) {
810 case SPEED_10:
811 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
812 val |= BNX2_EMAC_MODE_PORT_MII_10;
813 break;
814 }
815 /* fall through */
816 case SPEED_100:
817 val |= BNX2_EMAC_MODE_PORT_MII;
818 break;
819 case SPEED_2500:
820 val |= BNX2_EMAC_MODE_25G;
821 /* fall through */
822 case SPEED_1000:
823 val |= BNX2_EMAC_MODE_PORT_GMII;
824 break;
825 }
b6016b76
MC
826 }
827 else {
828 val |= BNX2_EMAC_MODE_PORT_GMII;
829 }
830
831 /* Set the MAC to operate in the appropriate duplex mode. */
832 if (bp->duplex == DUPLEX_HALF)
833 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
834 REG_WR(bp, BNX2_EMAC_MODE, val);
835
836 /* Enable/disable rx PAUSE. */
837 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
838
839 if (bp->flow_ctrl & FLOW_CTRL_RX)
840 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
841 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
842
843 /* Enable/disable tx PAUSE. */
844 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
845 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
846
847 if (bp->flow_ctrl & FLOW_CTRL_TX)
848 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
849 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
850
851 /* Acknowledge the interrupt. */
852 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
853
854 return 0;
855}
856
857static int
858bnx2_set_link(struct bnx2 *bp)
859{
860 u32 bmsr;
861 u8 link_up;
862
80be4434 863 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
b6016b76
MC
864 bp->link_up = 1;
865 return 0;
866 }
867
868 link_up = bp->link_up;
869
870 bnx2_read_phy(bp, MII_BMSR, &bmsr);
871 bnx2_read_phy(bp, MII_BMSR, &bmsr);
872
873 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
874 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
875 u32 val;
876
877 val = REG_RD(bp, BNX2_EMAC_STATUS);
878 if (val & BNX2_EMAC_STATUS_LINK)
879 bmsr |= BMSR_LSTATUS;
880 else
881 bmsr &= ~BMSR_LSTATUS;
882 }
883
884 if (bmsr & BMSR_LSTATUS) {
885 bp->link_up = 1;
886
887 if (bp->phy_flags & PHY_SERDES_FLAG) {
5b0c76ad
MC
888 if (CHIP_NUM(bp) == CHIP_NUM_5706)
889 bnx2_5706s_linkup(bp);
890 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
891 bnx2_5708s_linkup(bp);
b6016b76
MC
892 }
893 else {
894 bnx2_copper_linkup(bp);
895 }
896 bnx2_resolve_flow_ctrl(bp);
897 }
898 else {
899 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
900 (bp->autoneg & AUTONEG_SPEED)) {
901
902 u32 bmcr;
903
904 bnx2_read_phy(bp, MII_BMCR, &bmcr);
80be4434 905 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
b6016b76
MC
906 if (!(bmcr & BMCR_ANENABLE)) {
907 bnx2_write_phy(bp, MII_BMCR, bmcr |
908 BMCR_ANENABLE);
909 }
910 }
911 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
912 bp->link_up = 0;
913 }
914
915 if (bp->link_up != link_up) {
916 bnx2_report_link(bp);
917 }
918
919 bnx2_set_mac_link(bp);
920
921 return 0;
922}
923
924static int
925bnx2_reset_phy(struct bnx2 *bp)
926{
927 int i;
928 u32 reg;
929
930 bnx2_write_phy(bp, MII_BMCR, BMCR_RESET);
931
932#define PHY_RESET_MAX_WAIT 100
933 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
934 udelay(10);
935
936 bnx2_read_phy(bp, MII_BMCR, &reg);
937 if (!(reg & BMCR_RESET)) {
938 udelay(20);
939 break;
940 }
941 }
942 if (i == PHY_RESET_MAX_WAIT) {
943 return -EBUSY;
944 }
945 return 0;
946}
947
948static u32
949bnx2_phy_get_pause_adv(struct bnx2 *bp)
950{
951 u32 adv = 0;
952
953 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
954 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
955
956 if (bp->phy_flags & PHY_SERDES_FLAG) {
957 adv = ADVERTISE_1000XPAUSE;
958 }
959 else {
960 adv = ADVERTISE_PAUSE_CAP;
961 }
962 }
963 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
964 if (bp->phy_flags & PHY_SERDES_FLAG) {
965 adv = ADVERTISE_1000XPSE_ASYM;
966 }
967 else {
968 adv = ADVERTISE_PAUSE_ASYM;
969 }
970 }
971 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
972 if (bp->phy_flags & PHY_SERDES_FLAG) {
973 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
974 }
975 else {
976 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
977 }
978 }
979 return adv;
980}
981
982static int
983bnx2_setup_serdes_phy(struct bnx2 *bp)
984{
5b0c76ad 985 u32 adv, bmcr, up1;
b6016b76
MC
986 u32 new_adv = 0;
987
988 if (!(bp->autoneg & AUTONEG_SPEED)) {
989 u32 new_bmcr;
5b0c76ad
MC
990 int force_link_down = 0;
991
80be4434
MC
992 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
993 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
994
995 bnx2_read_phy(bp, MII_BMCR, &bmcr);
996 new_bmcr = bmcr & ~(BMCR_ANENABLE | BCM5708S_BMCR_FORCE_2500);
997 new_bmcr |= BMCR_SPEED1000;
998 if (bp->req_line_speed == SPEED_2500) {
999 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1000 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1001 if (!(up1 & BCM5708S_UP1_2G5)) {
1002 up1 |= BCM5708S_UP1_2G5;
1003 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1004 force_link_down = 1;
1005 }
1006 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5b0c76ad
MC
1007 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1008 if (up1 & BCM5708S_UP1_2G5) {
1009 up1 &= ~BCM5708S_UP1_2G5;
1010 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1011 force_link_down = 1;
1012 }
1013 }
1014
b6016b76 1015 if (bp->req_duplex == DUPLEX_FULL) {
5b0c76ad 1016 adv |= ADVERTISE_1000XFULL;
b6016b76
MC
1017 new_bmcr |= BMCR_FULLDPLX;
1018 }
1019 else {
5b0c76ad 1020 adv |= ADVERTISE_1000XHALF;
b6016b76
MC
1021 new_bmcr &= ~BMCR_FULLDPLX;
1022 }
5b0c76ad 1023 if ((new_bmcr != bmcr) || (force_link_down)) {
b6016b76
MC
1024 /* Force a link down visible on the other side */
1025 if (bp->link_up) {
5b0c76ad
MC
1026 bnx2_write_phy(bp, MII_ADVERTISE, adv &
1027 ~(ADVERTISE_1000XFULL |
1028 ADVERTISE_1000XHALF));
b6016b76
MC
1029 bnx2_write_phy(bp, MII_BMCR, bmcr |
1030 BMCR_ANRESTART | BMCR_ANENABLE);
1031
1032 bp->link_up = 0;
1033 netif_carrier_off(bp->dev);
5b0c76ad 1034 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
80be4434 1035 bnx2_report_link(bp);
b6016b76 1036 }
5b0c76ad 1037 bnx2_write_phy(bp, MII_ADVERTISE, adv);
b6016b76
MC
1038 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1039 }
1040 return 0;
1041 }
1042
5b0c76ad
MC
1043 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1044 bnx2_read_phy(bp, BCM5708S_UP1, &up1);
1045 up1 |= BCM5708S_UP1_2G5;
1046 bnx2_write_phy(bp, BCM5708S_UP1, up1);
1047 }
1048
b6016b76
MC
1049 if (bp->advertising & ADVERTISED_1000baseT_Full)
1050 new_adv |= ADVERTISE_1000XFULL;
1051
1052 new_adv |= bnx2_phy_get_pause_adv(bp);
1053
1054 bnx2_read_phy(bp, MII_ADVERTISE, &adv);
1055 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1056
1057 bp->serdes_an_pending = 0;
1058 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1059 /* Force a link down visible on the other side */
1060 if (bp->link_up) {
b6016b76 1061 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
80be4434
MC
1062 spin_unlock_bh(&bp->phy_lock);
1063 msleep(20);
1064 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
1065 }
1066
1067 bnx2_write_phy(bp, MII_ADVERTISE, new_adv);
1068 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART |
1069 BMCR_ANENABLE);
cd339a0e
MC
1070 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1071 /* Speed up link-up time when the link partner
1072 * does not autonegotiate which is very common
1073 * in blade servers. Some blade servers use
1074 * IPMI for kerboard input and it's important
1075 * to minimize link disruptions. Autoneg. involves
1076 * exchanging base pages plus 3 next pages and
1077 * normally completes in about 120 msec.
1078 */
1079 bp->current_interval = SERDES_AN_TIMEOUT;
1080 bp->serdes_an_pending = 1;
1081 mod_timer(&bp->timer, jiffies + bp->current_interval);
1082 }
b6016b76
MC
1083 }
1084
1085 return 0;
1086}
1087
1088#define ETHTOOL_ALL_FIBRE_SPEED \
1089 (ADVERTISED_1000baseT_Full)
1090
1091#define ETHTOOL_ALL_COPPER_SPEED \
1092 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1093 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1094 ADVERTISED_1000baseT_Full)
1095
1096#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1097 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
6aa20a22 1098
b6016b76
MC
1099#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1100
1101static int
1102bnx2_setup_copper_phy(struct bnx2 *bp)
1103{
1104 u32 bmcr;
1105 u32 new_bmcr;
1106
1107 bnx2_read_phy(bp, MII_BMCR, &bmcr);
1108
1109 if (bp->autoneg & AUTONEG_SPEED) {
1110 u32 adv_reg, adv1000_reg;
1111 u32 new_adv_reg = 0;
1112 u32 new_adv1000_reg = 0;
1113
1114 bnx2_read_phy(bp, MII_ADVERTISE, &adv_reg);
1115 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1116 ADVERTISE_PAUSE_ASYM);
1117
1118 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1119 adv1000_reg &= PHY_ALL_1000_SPEED;
1120
1121 if (bp->advertising & ADVERTISED_10baseT_Half)
1122 new_adv_reg |= ADVERTISE_10HALF;
1123 if (bp->advertising & ADVERTISED_10baseT_Full)
1124 new_adv_reg |= ADVERTISE_10FULL;
1125 if (bp->advertising & ADVERTISED_100baseT_Half)
1126 new_adv_reg |= ADVERTISE_100HALF;
1127 if (bp->advertising & ADVERTISED_100baseT_Full)
1128 new_adv_reg |= ADVERTISE_100FULL;
1129 if (bp->advertising & ADVERTISED_1000baseT_Full)
1130 new_adv1000_reg |= ADVERTISE_1000FULL;
6aa20a22 1131
b6016b76
MC
1132 new_adv_reg |= ADVERTISE_CSMA;
1133
1134 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1135
1136 if ((adv1000_reg != new_adv1000_reg) ||
1137 (adv_reg != new_adv_reg) ||
1138 ((bmcr & BMCR_ANENABLE) == 0)) {
1139
1140 bnx2_write_phy(bp, MII_ADVERTISE, new_adv_reg);
1141 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
1142 bnx2_write_phy(bp, MII_BMCR, BMCR_ANRESTART |
1143 BMCR_ANENABLE);
1144 }
1145 else if (bp->link_up) {
1146 /* Flow ctrl may have changed from auto to forced */
1147 /* or vice-versa. */
1148
1149 bnx2_resolve_flow_ctrl(bp);
1150 bnx2_set_mac_link(bp);
1151 }
1152 return 0;
1153 }
1154
1155 new_bmcr = 0;
1156 if (bp->req_line_speed == SPEED_100) {
1157 new_bmcr |= BMCR_SPEED100;
1158 }
1159 if (bp->req_duplex == DUPLEX_FULL) {
1160 new_bmcr |= BMCR_FULLDPLX;
1161 }
1162 if (new_bmcr != bmcr) {
1163 u32 bmsr;
1164 int i = 0;
1165
1166 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1167 bnx2_read_phy(bp, MII_BMSR, &bmsr);
6aa20a22 1168
b6016b76
MC
1169 if (bmsr & BMSR_LSTATUS) {
1170 /* Force link down */
1171 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
1172 do {
1173 udelay(100);
1174 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1175 bnx2_read_phy(bp, MII_BMSR, &bmsr);
1176 i++;
1177 } while ((bmsr & BMSR_LSTATUS) && (i < 620));
1178 }
1179
1180 bnx2_write_phy(bp, MII_BMCR, new_bmcr);
1181
1182 /* Normally, the new speed is setup after the link has
1183 * gone down and up again. In some cases, link will not go
1184 * down so we need to set up the new speed here.
1185 */
1186 if (bmsr & BMSR_LSTATUS) {
1187 bp->line_speed = bp->req_line_speed;
1188 bp->duplex = bp->req_duplex;
1189 bnx2_resolve_flow_ctrl(bp);
1190 bnx2_set_mac_link(bp);
1191 }
1192 }
1193 return 0;
1194}
1195
1196static int
1197bnx2_setup_phy(struct bnx2 *bp)
1198{
1199 if (bp->loopback == MAC_LOOPBACK)
1200 return 0;
1201
1202 if (bp->phy_flags & PHY_SERDES_FLAG) {
1203 return (bnx2_setup_serdes_phy(bp));
1204 }
1205 else {
1206 return (bnx2_setup_copper_phy(bp));
1207 }
1208}
1209
1210static int
5b0c76ad
MC
1211bnx2_init_5708s_phy(struct bnx2 *bp)
1212{
1213 u32 val;
1214
1215 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1216 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1217 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1218
1219 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1220 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1221 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1222
1223 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1224 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1225 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1226
1227 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1228 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1229 val |= BCM5708S_UP1_2G5;
1230 bnx2_write_phy(bp, BCM5708S_UP1, val);
1231 }
1232
1233 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
dda1e390
MC
1234 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1235 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
5b0c76ad
MC
1236 /* increase tx signal amplitude */
1237 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1238 BCM5708S_BLK_ADDR_TX_MISC);
1239 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1240 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1241 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1242 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1243 }
1244
e3648b3d 1245 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
5b0c76ad
MC
1246 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1247
1248 if (val) {
1249 u32 is_backplane;
1250
e3648b3d 1251 is_backplane = REG_RD_IND(bp, bp->shmem_base +
5b0c76ad
MC
1252 BNX2_SHARED_HW_CFG_CONFIG);
1253 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1254 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1255 BCM5708S_BLK_ADDR_TX_MISC);
1256 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1257 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1258 BCM5708S_BLK_ADDR_DIG);
1259 }
1260 }
1261 return 0;
1262}
1263
1264static int
1265bnx2_init_5706s_phy(struct bnx2 *bp)
b6016b76
MC
1266{
1267 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1268
1269 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
1270 REG_WR(bp, BNX2_MISC_UNUSED0, 0x300);
1271 }
1272
1273 if (bp->dev->mtu > 1500) {
1274 u32 val;
1275
1276 /* Set extended packet length bit */
1277 bnx2_write_phy(bp, 0x18, 0x7);
1278 bnx2_read_phy(bp, 0x18, &val);
1279 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1280
1281 bnx2_write_phy(bp, 0x1c, 0x6c00);
1282 bnx2_read_phy(bp, 0x1c, &val);
1283 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1284 }
1285 else {
1286 u32 val;
1287
1288 bnx2_write_phy(bp, 0x18, 0x7);
1289 bnx2_read_phy(bp, 0x18, &val);
1290 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1291
1292 bnx2_write_phy(bp, 0x1c, 0x6c00);
1293 bnx2_read_phy(bp, 0x1c, &val);
1294 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1295 }
1296
1297 return 0;
1298}
1299
1300static int
1301bnx2_init_copper_phy(struct bnx2 *bp)
1302{
5b0c76ad
MC
1303 u32 val;
1304
b6016b76
MC
1305 bp->phy_flags |= PHY_CRC_FIX_FLAG;
1306
1307 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1308 bnx2_write_phy(bp, 0x18, 0x0c00);
1309 bnx2_write_phy(bp, 0x17, 0x000a);
1310 bnx2_write_phy(bp, 0x15, 0x310b);
1311 bnx2_write_phy(bp, 0x17, 0x201f);
1312 bnx2_write_phy(bp, 0x15, 0x9506);
1313 bnx2_write_phy(bp, 0x17, 0x401f);
1314 bnx2_write_phy(bp, 0x15, 0x14e2);
1315 bnx2_write_phy(bp, 0x18, 0x0400);
1316 }
1317
1318 if (bp->dev->mtu > 1500) {
b6016b76
MC
1319 /* Set extended packet length bit */
1320 bnx2_write_phy(bp, 0x18, 0x7);
1321 bnx2_read_phy(bp, 0x18, &val);
1322 bnx2_write_phy(bp, 0x18, val | 0x4000);
1323
1324 bnx2_read_phy(bp, 0x10, &val);
1325 bnx2_write_phy(bp, 0x10, val | 0x1);
1326 }
1327 else {
b6016b76
MC
1328 bnx2_write_phy(bp, 0x18, 0x7);
1329 bnx2_read_phy(bp, 0x18, &val);
1330 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1331
1332 bnx2_read_phy(bp, 0x10, &val);
1333 bnx2_write_phy(bp, 0x10, val & ~0x1);
1334 }
1335
5b0c76ad
MC
1336 /* ethernet@wirespeed */
1337 bnx2_write_phy(bp, 0x18, 0x7007);
1338 bnx2_read_phy(bp, 0x18, &val);
1339 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
b6016b76
MC
1340 return 0;
1341}
1342
1343
1344static int
1345bnx2_init_phy(struct bnx2 *bp)
1346{
1347 u32 val;
1348 int rc = 0;
1349
1350 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1351 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1352
1353 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1354
1355 bnx2_reset_phy(bp);
1356
1357 bnx2_read_phy(bp, MII_PHYSID1, &val);
1358 bp->phy_id = val << 16;
1359 bnx2_read_phy(bp, MII_PHYSID2, &val);
1360 bp->phy_id |= val & 0xffff;
1361
1362 if (bp->phy_flags & PHY_SERDES_FLAG) {
5b0c76ad
MC
1363 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1364 rc = bnx2_init_5706s_phy(bp);
1365 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1366 rc = bnx2_init_5708s_phy(bp);
b6016b76
MC
1367 }
1368 else {
1369 rc = bnx2_init_copper_phy(bp);
1370 }
1371
1372 bnx2_setup_phy(bp);
1373
1374 return rc;
1375}
1376
1377static int
1378bnx2_set_mac_loopback(struct bnx2 *bp)
1379{
1380 u32 mac_mode;
1381
1382 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1383 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1384 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1385 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1386 bp->link_up = 1;
1387 return 0;
1388}
1389
bc5a0690
MC
1390static int bnx2_test_link(struct bnx2 *);
1391
1392static int
1393bnx2_set_phy_loopback(struct bnx2 *bp)
1394{
1395 u32 mac_mode;
1396 int rc, i;
1397
1398 spin_lock_bh(&bp->phy_lock);
1399 rc = bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK | BMCR_FULLDPLX |
1400 BMCR_SPEED1000);
1401 spin_unlock_bh(&bp->phy_lock);
1402 if (rc)
1403 return rc;
1404
1405 for (i = 0; i < 10; i++) {
1406 if (bnx2_test_link(bp) == 0)
1407 break;
80be4434 1408 msleep(100);
bc5a0690
MC
1409 }
1410
1411 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1412 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1413 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1414 BNX2_EMAC_MODE_25G);
1415
1416 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1417 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1418 bp->link_up = 1;
1419 return 0;
1420}
1421
b6016b76 1422static int
b090ae2b 1423bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
b6016b76
MC
1424{
1425 int i;
1426 u32 val;
1427
b6016b76
MC
1428 bp->fw_wr_seq++;
1429 msg_data |= bp->fw_wr_seq;
1430
e3648b3d 1431 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
b6016b76
MC
1432
1433 /* wait for an acknowledgement. */
b090ae2b
MC
1434 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1435 msleep(10);
b6016b76 1436
e3648b3d 1437 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
b6016b76
MC
1438
1439 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1440 break;
1441 }
b090ae2b
MC
1442 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1443 return 0;
b6016b76
MC
1444
1445 /* If we timed out, inform the firmware that this is the case. */
b090ae2b
MC
1446 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1447 if (!silent)
1448 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1449 "%x\n", msg_data);
b6016b76
MC
1450
1451 msg_data &= ~BNX2_DRV_MSG_CODE;
1452 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1453
e3648b3d 1454 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
b6016b76 1455
b6016b76
MC
1456 return -EBUSY;
1457 }
1458
b090ae2b
MC
1459 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
1460 return -EIO;
1461
b6016b76
MC
1462 return 0;
1463}
1464
1465static void
1466bnx2_init_context(struct bnx2 *bp)
1467{
1468 u32 vcid;
1469
1470 vcid = 96;
1471 while (vcid) {
1472 u32 vcid_addr, pcid_addr, offset;
1473
1474 vcid--;
1475
1476 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
1477 u32 new_vcid;
1478
1479 vcid_addr = GET_PCID_ADDR(vcid);
1480 if (vcid & 0x8) {
1481 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
1482 }
1483 else {
1484 new_vcid = vcid;
1485 }
1486 pcid_addr = GET_PCID_ADDR(new_vcid);
1487 }
1488 else {
1489 vcid_addr = GET_CID_ADDR(vcid);
1490 pcid_addr = vcid_addr;
1491 }
1492
1493 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
1494 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1495
1496 /* Zero out the context. */
1497 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4) {
1498 CTX_WR(bp, 0x00, offset, 0);
1499 }
1500
1501 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
1502 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
1503 }
1504}
1505
1506static int
1507bnx2_alloc_bad_rbuf(struct bnx2 *bp)
1508{
1509 u16 *good_mbuf;
1510 u32 good_mbuf_cnt;
1511 u32 val;
1512
1513 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
1514 if (good_mbuf == NULL) {
1515 printk(KERN_ERR PFX "Failed to allocate memory in "
1516 "bnx2_alloc_bad_rbuf\n");
1517 return -ENOMEM;
1518 }
1519
1520 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
1521 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
1522
1523 good_mbuf_cnt = 0;
1524
1525 /* Allocate a bunch of mbufs and save the good ones in an array. */
1526 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1527 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
1528 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
1529
1530 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
1531
1532 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
1533
1534 /* The addresses with Bit 9 set are bad memory blocks. */
1535 if (!(val & (1 << 9))) {
1536 good_mbuf[good_mbuf_cnt] = (u16) val;
1537 good_mbuf_cnt++;
1538 }
1539
1540 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
1541 }
1542
1543 /* Free the good ones back to the mbuf pool thus discarding
1544 * all the bad ones. */
1545 while (good_mbuf_cnt) {
1546 good_mbuf_cnt--;
1547
1548 val = good_mbuf[good_mbuf_cnt];
1549 val = (val << 9) | val | 1;
1550
1551 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
1552 }
1553 kfree(good_mbuf);
1554 return 0;
1555}
1556
1557static void
6aa20a22 1558bnx2_set_mac_addr(struct bnx2 *bp)
b6016b76
MC
1559{
1560 u32 val;
1561 u8 *mac_addr = bp->dev->dev_addr;
1562
1563 val = (mac_addr[0] << 8) | mac_addr[1];
1564
1565 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
1566
6aa20a22 1567 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
b6016b76
MC
1568 (mac_addr[4] << 8) | mac_addr[5];
1569
1570 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
1571}
1572
1573static inline int
1574bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
1575{
1576 struct sk_buff *skb;
1577 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
1578 dma_addr_t mapping;
13daffa2 1579 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
b6016b76
MC
1580 unsigned long align;
1581
932f3772 1582 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
b6016b76
MC
1583 if (skb == NULL) {
1584 return -ENOMEM;
1585 }
1586
1587 if (unlikely((align = (unsigned long) skb->data & 0x7))) {
1588 skb_reserve(skb, 8 - align);
1589 }
1590
b6016b76
MC
1591 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
1592 PCI_DMA_FROMDEVICE);
1593
1594 rx_buf->skb = skb;
1595 pci_unmap_addr_set(rx_buf, mapping, mapping);
1596
1597 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
1598 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
1599
1600 bp->rx_prod_bseq += bp->rx_buf_use_size;
1601
1602 return 0;
1603}
1604
1605static void
1606bnx2_phy_int(struct bnx2 *bp)
1607{
1608 u32 new_link_state, old_link_state;
1609
1610 new_link_state = bp->status_blk->status_attn_bits &
1611 STATUS_ATTN_BITS_LINK_STATE;
1612 old_link_state = bp->status_blk->status_attn_bits_ack &
1613 STATUS_ATTN_BITS_LINK_STATE;
1614 if (new_link_state != old_link_state) {
1615 if (new_link_state) {
1616 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD,
1617 STATUS_ATTN_BITS_LINK_STATE);
1618 }
1619 else {
1620 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD,
1621 STATUS_ATTN_BITS_LINK_STATE);
1622 }
1623 bnx2_set_link(bp);
1624 }
1625}
1626
1627static void
1628bnx2_tx_int(struct bnx2 *bp)
1629{
f4e418f7 1630 struct status_block *sblk = bp->status_blk;
b6016b76
MC
1631 u16 hw_cons, sw_cons, sw_ring_cons;
1632 int tx_free_bd = 0;
1633
f4e418f7 1634 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
b6016b76
MC
1635 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1636 hw_cons++;
1637 }
1638 sw_cons = bp->tx_cons;
1639
1640 while (sw_cons != hw_cons) {
1641 struct sw_bd *tx_buf;
1642 struct sk_buff *skb;
1643 int i, last;
1644
1645 sw_ring_cons = TX_RING_IDX(sw_cons);
1646
1647 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
1648 skb = tx_buf->skb;
6aa20a22 1649#ifdef BCM_TSO
b6016b76 1650 /* partial BD completions possible with TSO packets */
89114afd 1651 if (skb_is_gso(skb)) {
b6016b76
MC
1652 u16 last_idx, last_ring_idx;
1653
1654 last_idx = sw_cons +
1655 skb_shinfo(skb)->nr_frags + 1;
1656 last_ring_idx = sw_ring_cons +
1657 skb_shinfo(skb)->nr_frags + 1;
1658 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
1659 last_idx++;
1660 }
1661 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
1662 break;
1663 }
1664 }
1665#endif
1666 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
1667 skb_headlen(skb), PCI_DMA_TODEVICE);
1668
1669 tx_buf->skb = NULL;
1670 last = skb_shinfo(skb)->nr_frags;
1671
1672 for (i = 0; i < last; i++) {
1673 sw_cons = NEXT_TX_BD(sw_cons);
1674
1675 pci_unmap_page(bp->pdev,
1676 pci_unmap_addr(
1677 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
1678 mapping),
1679 skb_shinfo(skb)->frags[i].size,
1680 PCI_DMA_TODEVICE);
1681 }
1682
1683 sw_cons = NEXT_TX_BD(sw_cons);
1684
1685 tx_free_bd += last + 1;
1686
745720e5 1687 dev_kfree_skb(skb);
b6016b76 1688
f4e418f7
MC
1689 hw_cons = bp->hw_tx_cons =
1690 sblk->status_tx_quick_consumer_index0;
1691
b6016b76
MC
1692 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
1693 hw_cons++;
1694 }
1695 }
1696
e89bbf10 1697 bp->tx_cons = sw_cons;
2f8af120
MC
1698 /* Need to make the tx_cons update visible to bnx2_start_xmit()
1699 * before checking for netif_queue_stopped(). Without the
1700 * memory barrier, there is a small possibility that bnx2_start_xmit()
1701 * will miss it and cause the queue to be stopped forever.
1702 */
1703 smp_mb();
b6016b76 1704
2f8af120
MC
1705 if (unlikely(netif_queue_stopped(bp->dev)) &&
1706 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
1707 netif_tx_lock(bp->dev);
b6016b76 1708 if ((netif_queue_stopped(bp->dev)) &&
2f8af120 1709 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
b6016b76 1710 netif_wake_queue(bp->dev);
2f8af120 1711 netif_tx_unlock(bp->dev);
b6016b76 1712 }
b6016b76
MC
1713}
1714
1715static inline void
1716bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
1717 u16 cons, u16 prod)
1718{
236b6394
MC
1719 struct sw_bd *cons_rx_buf, *prod_rx_buf;
1720 struct rx_bd *cons_bd, *prod_bd;
1721
1722 cons_rx_buf = &bp->rx_buf_ring[cons];
1723 prod_rx_buf = &bp->rx_buf_ring[prod];
b6016b76
MC
1724
1725 pci_dma_sync_single_for_device(bp->pdev,
1726 pci_unmap_addr(cons_rx_buf, mapping),
1727 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1728
236b6394 1729 bp->rx_prod_bseq += bp->rx_buf_use_size;
b6016b76 1730
236b6394 1731 prod_rx_buf->skb = skb;
b6016b76 1732
236b6394
MC
1733 if (cons == prod)
1734 return;
b6016b76 1735
236b6394
MC
1736 pci_unmap_addr_set(prod_rx_buf, mapping,
1737 pci_unmap_addr(cons_rx_buf, mapping));
1738
3fdfcc2c
MC
1739 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
1740 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
236b6394
MC
1741 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
1742 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
b6016b76
MC
1743}
1744
1745static int
1746bnx2_rx_int(struct bnx2 *bp, int budget)
1747{
f4e418f7 1748 struct status_block *sblk = bp->status_blk;
b6016b76
MC
1749 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
1750 struct l2_fhdr *rx_hdr;
1751 int rx_pkt = 0;
1752
f4e418f7 1753 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
b6016b76
MC
1754 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
1755 hw_cons++;
1756 }
1757 sw_cons = bp->rx_cons;
1758 sw_prod = bp->rx_prod;
1759
1760 /* Memory barrier necessary as speculative reads of the rx
1761 * buffer can be ahead of the index in the status block
1762 */
1763 rmb();
1764 while (sw_cons != hw_cons) {
1765 unsigned int len;
ade2bfe7 1766 u32 status;
b6016b76
MC
1767 struct sw_bd *rx_buf;
1768 struct sk_buff *skb;
236b6394 1769 dma_addr_t dma_addr;
b6016b76
MC
1770
1771 sw_ring_cons = RX_RING_IDX(sw_cons);
1772 sw_ring_prod = RX_RING_IDX(sw_prod);
1773
1774 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
1775 skb = rx_buf->skb;
236b6394
MC
1776
1777 rx_buf->skb = NULL;
1778
1779 dma_addr = pci_unmap_addr(rx_buf, mapping);
1780
1781 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
b6016b76
MC
1782 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1783
1784 rx_hdr = (struct l2_fhdr *) skb->data;
1785 len = rx_hdr->l2_fhdr_pkt_len - 4;
1786
ade2bfe7 1787 if ((status = rx_hdr->l2_fhdr_status) &
b6016b76
MC
1788 (L2_FHDR_ERRORS_BAD_CRC |
1789 L2_FHDR_ERRORS_PHY_DECODE |
1790 L2_FHDR_ERRORS_ALIGNMENT |
1791 L2_FHDR_ERRORS_TOO_SHORT |
1792 L2_FHDR_ERRORS_GIANT_FRAME)) {
1793
1794 goto reuse_rx;
1795 }
1796
1797 /* Since we don't have a jumbo ring, copy small packets
1798 * if mtu > 1500
1799 */
1800 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
1801 struct sk_buff *new_skb;
1802
932f3772 1803 new_skb = netdev_alloc_skb(bp->dev, len + 2);
b6016b76
MC
1804 if (new_skb == NULL)
1805 goto reuse_rx;
1806
1807 /* aligned copy */
1808 memcpy(new_skb->data,
1809 skb->data + bp->rx_offset - 2,
1810 len + 2);
1811
1812 skb_reserve(new_skb, 2);
1813 skb_put(new_skb, len);
b6016b76
MC
1814
1815 bnx2_reuse_rx_skb(bp, skb,
1816 sw_ring_cons, sw_ring_prod);
1817
1818 skb = new_skb;
1819 }
1820 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
236b6394 1821 pci_unmap_single(bp->pdev, dma_addr,
b6016b76
MC
1822 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
1823
1824 skb_reserve(skb, bp->rx_offset);
1825 skb_put(skb, len);
1826 }
1827 else {
1828reuse_rx:
1829 bnx2_reuse_rx_skb(bp, skb,
1830 sw_ring_cons, sw_ring_prod);
1831 goto next_rx;
1832 }
1833
1834 skb->protocol = eth_type_trans(skb, bp->dev);
1835
1836 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
d1e100ba 1837 (ntohs(skb->protocol) != 0x8100)) {
b6016b76 1838
745720e5 1839 dev_kfree_skb(skb);
b6016b76
MC
1840 goto next_rx;
1841
1842 }
1843
b6016b76
MC
1844 skb->ip_summed = CHECKSUM_NONE;
1845 if (bp->rx_csum &&
1846 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
1847 L2_FHDR_STATUS_UDP_DATAGRAM))) {
1848
ade2bfe7
MC
1849 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
1850 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
b6016b76
MC
1851 skb->ip_summed = CHECKSUM_UNNECESSARY;
1852 }
1853
1854#ifdef BCM_VLAN
1855 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
1856 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1857 rx_hdr->l2_fhdr_vlan_tag);
1858 }
1859 else
1860#endif
1861 netif_receive_skb(skb);
1862
1863 bp->dev->last_rx = jiffies;
1864 rx_pkt++;
1865
1866next_rx:
b6016b76
MC
1867 sw_cons = NEXT_RX_BD(sw_cons);
1868 sw_prod = NEXT_RX_BD(sw_prod);
1869
1870 if ((rx_pkt == budget))
1871 break;
f4e418f7
MC
1872
1873 /* Refresh hw_cons to see if there is new work */
1874 if (sw_cons == hw_cons) {
1875 hw_cons = bp->hw_rx_cons =
1876 sblk->status_rx_quick_consumer_index0;
1877 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
1878 hw_cons++;
1879 rmb();
1880 }
b6016b76
MC
1881 }
1882 bp->rx_cons = sw_cons;
1883 bp->rx_prod = sw_prod;
1884
1885 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
1886
1887 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
1888
1889 mmiowb();
1890
1891 return rx_pkt;
1892
1893}
1894
1895/* MSI ISR - The only difference between this and the INTx ISR
1896 * is that the MSI interrupt is always serviced.
1897 */
1898static irqreturn_t
7d12e780 1899bnx2_msi(int irq, void *dev_instance)
b6016b76
MC
1900{
1901 struct net_device *dev = dev_instance;
972ec0d4 1902 struct bnx2 *bp = netdev_priv(dev);
b6016b76 1903
c921e4c4 1904 prefetch(bp->status_blk);
b6016b76
MC
1905 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1906 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1907 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1908
1909 /* Return here if interrupt is disabled. */
73eef4cd
MC
1910 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1911 return IRQ_HANDLED;
b6016b76 1912
73eef4cd 1913 netif_rx_schedule(dev);
b6016b76 1914
73eef4cd 1915 return IRQ_HANDLED;
b6016b76
MC
1916}
1917
1918static irqreturn_t
7d12e780 1919bnx2_interrupt(int irq, void *dev_instance)
b6016b76
MC
1920{
1921 struct net_device *dev = dev_instance;
972ec0d4 1922 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
1923
1924 /* When using INTx, it is possible for the interrupt to arrive
1925 * at the CPU before the status block posted prior to the
1926 * interrupt. Reading a register will flush the status block.
1927 * When using MSI, the MSI message will always complete after
1928 * the status block write.
1929 */
c921e4c4 1930 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
b6016b76
MC
1931 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
1932 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
73eef4cd 1933 return IRQ_NONE;
b6016b76
MC
1934
1935 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1936 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
1937 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
1938
1939 /* Return here if interrupt is shared and is disabled. */
73eef4cd
MC
1940 if (unlikely(atomic_read(&bp->intr_sem) != 0))
1941 return IRQ_HANDLED;
b6016b76 1942
73eef4cd 1943 netif_rx_schedule(dev);
b6016b76 1944
73eef4cd 1945 return IRQ_HANDLED;
b6016b76
MC
1946}
1947
f4e418f7
MC
1948static inline int
1949bnx2_has_work(struct bnx2 *bp)
1950{
1951 struct status_block *sblk = bp->status_blk;
1952
1953 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
1954 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
1955 return 1;
1956
1957 if (((sblk->status_attn_bits & STATUS_ATTN_BITS_LINK_STATE) != 0) !=
1958 bp->link_up)
1959 return 1;
1960
1961 return 0;
1962}
1963
b6016b76
MC
1964static int
1965bnx2_poll(struct net_device *dev, int *budget)
1966{
972ec0d4 1967 struct bnx2 *bp = netdev_priv(dev);
b6016b76 1968
b6016b76
MC
1969 if ((bp->status_blk->status_attn_bits &
1970 STATUS_ATTN_BITS_LINK_STATE) !=
1971 (bp->status_blk->status_attn_bits_ack &
1972 STATUS_ATTN_BITS_LINK_STATE)) {
1973
c770a65c 1974 spin_lock(&bp->phy_lock);
b6016b76 1975 bnx2_phy_int(bp);
c770a65c 1976 spin_unlock(&bp->phy_lock);
bf5295bb
MC
1977
1978 /* This is needed to take care of transient status
1979 * during link changes.
1980 */
1981 REG_WR(bp, BNX2_HC_COMMAND,
1982 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
1983 REG_RD(bp, BNX2_HC_COMMAND);
b6016b76
MC
1984 }
1985
f4e418f7 1986 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
b6016b76 1987 bnx2_tx_int(bp);
b6016b76 1988
f4e418f7 1989 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
b6016b76
MC
1990 int orig_budget = *budget;
1991 int work_done;
1992
1993 if (orig_budget > dev->quota)
1994 orig_budget = dev->quota;
6aa20a22 1995
b6016b76
MC
1996 work_done = bnx2_rx_int(bp, orig_budget);
1997 *budget -= work_done;
1998 dev->quota -= work_done;
b6016b76 1999 }
6aa20a22 2000
f4e418f7
MC
2001 bp->last_status_idx = bp->status_blk->status_idx;
2002 rmb();
2003
2004 if (!bnx2_has_work(bp)) {
b6016b76 2005 netif_rx_complete(dev);
1269a8a6
MC
2006 if (likely(bp->flags & USING_MSI_FLAG)) {
2007 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2008 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2009 bp->last_status_idx);
2010 return 0;
2011 }
2012 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2013 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2014 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2015 bp->last_status_idx);
2016
b6016b76 2017 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1269a8a6
MC
2018 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2019 bp->last_status_idx);
b6016b76
MC
2020 return 0;
2021 }
2022
2023 return 1;
2024}
2025
932ff279 2026/* Called with rtnl_lock from vlan functions and also netif_tx_lock
b6016b76
MC
2027 * from set_multicast.
2028 */
2029static void
2030bnx2_set_rx_mode(struct net_device *dev)
2031{
972ec0d4 2032 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
2033 u32 rx_mode, sort_mode;
2034 int i;
b6016b76 2035
c770a65c 2036 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
2037
2038 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2039 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2040 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2041#ifdef BCM_VLAN
e29054f9 2042 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
b6016b76 2043 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
b6016b76 2044#else
e29054f9
MC
2045 if (!(bp->flags & ASF_ENABLE_FLAG))
2046 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
b6016b76
MC
2047#endif
2048 if (dev->flags & IFF_PROMISC) {
2049 /* Promiscuous mode. */
2050 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
7510873d
MC
2051 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2052 BNX2_RPM_SORT_USER0_PROM_VLAN;
b6016b76
MC
2053 }
2054 else if (dev->flags & IFF_ALLMULTI) {
2055 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2056 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2057 0xffffffff);
2058 }
2059 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2060 }
2061 else {
2062 /* Accept one or more multicast(s). */
2063 struct dev_mc_list *mclist;
2064 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2065 u32 regidx;
2066 u32 bit;
2067 u32 crc;
2068
2069 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2070
2071 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2072 i++, mclist = mclist->next) {
2073
2074 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2075 bit = crc & 0xff;
2076 regidx = (bit & 0xe0) >> 5;
2077 bit &= 0x1f;
2078 mc_filter[regidx] |= (1 << bit);
2079 }
2080
2081 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2082 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2083 mc_filter[i]);
2084 }
2085
2086 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2087 }
2088
2089 if (rx_mode != bp->rx_mode) {
2090 bp->rx_mode = rx_mode;
2091 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2092 }
2093
2094 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2095 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2096 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2097
c770a65c 2098 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
2099}
2100
fba9fe91
MC
2101#define FW_BUF_SIZE 0x8000
2102
2103static int
2104bnx2_gunzip_init(struct bnx2 *bp)
2105{
2106 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2107 goto gunzip_nomem1;
2108
2109 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2110 goto gunzip_nomem2;
2111
2112 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2113 if (bp->strm->workspace == NULL)
2114 goto gunzip_nomem3;
2115
2116 return 0;
2117
2118gunzip_nomem3:
2119 kfree(bp->strm);
2120 bp->strm = NULL;
2121
2122gunzip_nomem2:
2123 vfree(bp->gunzip_buf);
2124 bp->gunzip_buf = NULL;
2125
2126gunzip_nomem1:
2127 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2128 "uncompression.\n", bp->dev->name);
2129 return -ENOMEM;
2130}
2131
2132static void
2133bnx2_gunzip_end(struct bnx2 *bp)
2134{
2135 kfree(bp->strm->workspace);
2136
2137 kfree(bp->strm);
2138 bp->strm = NULL;
2139
2140 if (bp->gunzip_buf) {
2141 vfree(bp->gunzip_buf);
2142 bp->gunzip_buf = NULL;
2143 }
2144}
2145
2146static int
2147bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2148{
2149 int n, rc;
2150
2151 /* check gzip header */
2152 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2153 return -EINVAL;
2154
2155 n = 10;
2156
2157#define FNAME 0x8
2158 if (zbuf[3] & FNAME)
2159 while ((zbuf[n++] != 0) && (n < len));
2160
2161 bp->strm->next_in = zbuf + n;
2162 bp->strm->avail_in = len - n;
2163 bp->strm->next_out = bp->gunzip_buf;
2164 bp->strm->avail_out = FW_BUF_SIZE;
2165
2166 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2167 if (rc != Z_OK)
2168 return rc;
2169
2170 rc = zlib_inflate(bp->strm, Z_FINISH);
2171
2172 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2173 *outbuf = bp->gunzip_buf;
2174
2175 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2176 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2177 bp->dev->name, bp->strm->msg);
2178
2179 zlib_inflateEnd(bp->strm);
2180
2181 if (rc == Z_STREAM_END)
2182 return 0;
2183
2184 return rc;
2185}
2186
b6016b76
MC
2187static void
2188load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2189 u32 rv2p_proc)
2190{
2191 int i;
2192 u32 val;
2193
2194
2195 for (i = 0; i < rv2p_code_len; i += 8) {
fba9fe91 2196 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
b6016b76 2197 rv2p_code++;
fba9fe91 2198 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
b6016b76
MC
2199 rv2p_code++;
2200
2201 if (rv2p_proc == RV2P_PROC1) {
2202 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2203 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2204 }
2205 else {
2206 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2207 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2208 }
2209 }
2210
2211 /* Reset the processor, un-stall is done later. */
2212 if (rv2p_proc == RV2P_PROC1) {
2213 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2214 }
2215 else {
2216 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2217 }
2218}
2219
2220static void
2221load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2222{
2223 u32 offset;
2224 u32 val;
2225
2226 /* Halt the CPU. */
2227 val = REG_RD_IND(bp, cpu_reg->mode);
2228 val |= cpu_reg->mode_value_halt;
2229 REG_WR_IND(bp, cpu_reg->mode, val);
2230 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2231
2232 /* Load the Text area. */
2233 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
2234 if (fw->text) {
2235 int j;
2236
2237 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
fba9fe91 2238 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
b6016b76
MC
2239 }
2240 }
2241
2242 /* Load the Data area. */
2243 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2244 if (fw->data) {
2245 int j;
2246
2247 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2248 REG_WR_IND(bp, offset, fw->data[j]);
2249 }
2250 }
2251
2252 /* Load the SBSS area. */
2253 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2254 if (fw->sbss) {
2255 int j;
2256
2257 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2258 REG_WR_IND(bp, offset, fw->sbss[j]);
2259 }
2260 }
2261
2262 /* Load the BSS area. */
2263 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2264 if (fw->bss) {
2265 int j;
2266
2267 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2268 REG_WR_IND(bp, offset, fw->bss[j]);
2269 }
2270 }
2271
2272 /* Load the Read-Only area. */
2273 offset = cpu_reg->spad_base +
2274 (fw->rodata_addr - cpu_reg->mips_view_base);
2275 if (fw->rodata) {
2276 int j;
2277
2278 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2279 REG_WR_IND(bp, offset, fw->rodata[j]);
2280 }
2281 }
2282
2283 /* Clear the pre-fetch instruction. */
2284 REG_WR_IND(bp, cpu_reg->inst, 0);
2285 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2286
2287 /* Start the CPU. */
2288 val = REG_RD_IND(bp, cpu_reg->mode);
2289 val &= ~cpu_reg->mode_value_halt;
2290 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2291 REG_WR_IND(bp, cpu_reg->mode, val);
2292}
2293
fba9fe91 2294static int
b6016b76
MC
2295bnx2_init_cpus(struct bnx2 *bp)
2296{
2297 struct cpu_reg cpu_reg;
2298 struct fw_info fw;
fba9fe91
MC
2299 int rc = 0;
2300 void *text;
2301 u32 text_len;
2302
2303 if ((rc = bnx2_gunzip_init(bp)) != 0)
2304 return rc;
b6016b76
MC
2305
2306 /* Initialize the RV2P processor. */
fba9fe91
MC
2307 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2308 &text_len);
2309 if (rc)
2310 goto init_cpu_err;
2311
2312 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2313
2314 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2315 &text_len);
2316 if (rc)
2317 goto init_cpu_err;
2318
2319 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
b6016b76
MC
2320
2321 /* Initialize the RX Processor. */
2322 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2323 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2324 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2325 cpu_reg.state = BNX2_RXP_CPU_STATE;
2326 cpu_reg.state_value_clear = 0xffffff;
2327 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2328 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2329 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2330 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2331 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2332 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2333 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 2334
b6016b76
MC
2335 fw.ver_major = bnx2_RXP_b06FwReleaseMajor;
2336 fw.ver_minor = bnx2_RXP_b06FwReleaseMinor;
2337 fw.ver_fix = bnx2_RXP_b06FwReleaseFix;
2338 fw.start_addr = bnx2_RXP_b06FwStartAddr;
2339
2340 fw.text_addr = bnx2_RXP_b06FwTextAddr;
2341 fw.text_len = bnx2_RXP_b06FwTextLen;
2342 fw.text_index = 0;
fba9fe91
MC
2343
2344 rc = bnx2_gunzip(bp, bnx2_RXP_b06FwText, sizeof(bnx2_RXP_b06FwText),
2345 &text, &text_len);
2346 if (rc)
2347 goto init_cpu_err;
2348
2349 fw.text = text;
b6016b76
MC
2350
2351 fw.data_addr = bnx2_RXP_b06FwDataAddr;
2352 fw.data_len = bnx2_RXP_b06FwDataLen;
2353 fw.data_index = 0;
2354 fw.data = bnx2_RXP_b06FwData;
2355
2356 fw.sbss_addr = bnx2_RXP_b06FwSbssAddr;
2357 fw.sbss_len = bnx2_RXP_b06FwSbssLen;
2358 fw.sbss_index = 0;
2359 fw.sbss = bnx2_RXP_b06FwSbss;
2360
2361 fw.bss_addr = bnx2_RXP_b06FwBssAddr;
2362 fw.bss_len = bnx2_RXP_b06FwBssLen;
2363 fw.bss_index = 0;
2364 fw.bss = bnx2_RXP_b06FwBss;
2365
2366 fw.rodata_addr = bnx2_RXP_b06FwRodataAddr;
2367 fw.rodata_len = bnx2_RXP_b06FwRodataLen;
2368 fw.rodata_index = 0;
2369 fw.rodata = bnx2_RXP_b06FwRodata;
2370
2371 load_cpu_fw(bp, &cpu_reg, &fw);
2372
2373 /* Initialize the TX Processor. */
2374 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2375 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2376 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2377 cpu_reg.state = BNX2_TXP_CPU_STATE;
2378 cpu_reg.state_value_clear = 0xffffff;
2379 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2380 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2381 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2382 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2383 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2384 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2385 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 2386
b6016b76
MC
2387 fw.ver_major = bnx2_TXP_b06FwReleaseMajor;
2388 fw.ver_minor = bnx2_TXP_b06FwReleaseMinor;
2389 fw.ver_fix = bnx2_TXP_b06FwReleaseFix;
2390 fw.start_addr = bnx2_TXP_b06FwStartAddr;
2391
2392 fw.text_addr = bnx2_TXP_b06FwTextAddr;
2393 fw.text_len = bnx2_TXP_b06FwTextLen;
2394 fw.text_index = 0;
fba9fe91
MC
2395
2396 rc = bnx2_gunzip(bp, bnx2_TXP_b06FwText, sizeof(bnx2_TXP_b06FwText),
2397 &text, &text_len);
2398 if (rc)
2399 goto init_cpu_err;
2400
2401 fw.text = text;
b6016b76
MC
2402
2403 fw.data_addr = bnx2_TXP_b06FwDataAddr;
2404 fw.data_len = bnx2_TXP_b06FwDataLen;
2405 fw.data_index = 0;
2406 fw.data = bnx2_TXP_b06FwData;
2407
2408 fw.sbss_addr = bnx2_TXP_b06FwSbssAddr;
2409 fw.sbss_len = bnx2_TXP_b06FwSbssLen;
2410 fw.sbss_index = 0;
2411 fw.sbss = bnx2_TXP_b06FwSbss;
2412
2413 fw.bss_addr = bnx2_TXP_b06FwBssAddr;
2414 fw.bss_len = bnx2_TXP_b06FwBssLen;
2415 fw.bss_index = 0;
2416 fw.bss = bnx2_TXP_b06FwBss;
2417
2418 fw.rodata_addr = bnx2_TXP_b06FwRodataAddr;
2419 fw.rodata_len = bnx2_TXP_b06FwRodataLen;
2420 fw.rodata_index = 0;
2421 fw.rodata = bnx2_TXP_b06FwRodata;
2422
2423 load_cpu_fw(bp, &cpu_reg, &fw);
2424
2425 /* Initialize the TX Patch-up Processor. */
2426 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
2427 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
2428 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
2429 cpu_reg.state = BNX2_TPAT_CPU_STATE;
2430 cpu_reg.state_value_clear = 0xffffff;
2431 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
2432 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
2433 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
2434 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
2435 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
2436 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
2437 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 2438
b6016b76
MC
2439 fw.ver_major = bnx2_TPAT_b06FwReleaseMajor;
2440 fw.ver_minor = bnx2_TPAT_b06FwReleaseMinor;
2441 fw.ver_fix = bnx2_TPAT_b06FwReleaseFix;
2442 fw.start_addr = bnx2_TPAT_b06FwStartAddr;
2443
2444 fw.text_addr = bnx2_TPAT_b06FwTextAddr;
2445 fw.text_len = bnx2_TPAT_b06FwTextLen;
2446 fw.text_index = 0;
fba9fe91
MC
2447
2448 rc = bnx2_gunzip(bp, bnx2_TPAT_b06FwText, sizeof(bnx2_TPAT_b06FwText),
2449 &text, &text_len);
2450 if (rc)
2451 goto init_cpu_err;
2452
2453 fw.text = text;
b6016b76
MC
2454
2455 fw.data_addr = bnx2_TPAT_b06FwDataAddr;
2456 fw.data_len = bnx2_TPAT_b06FwDataLen;
2457 fw.data_index = 0;
2458 fw.data = bnx2_TPAT_b06FwData;
2459
2460 fw.sbss_addr = bnx2_TPAT_b06FwSbssAddr;
2461 fw.sbss_len = bnx2_TPAT_b06FwSbssLen;
2462 fw.sbss_index = 0;
2463 fw.sbss = bnx2_TPAT_b06FwSbss;
2464
2465 fw.bss_addr = bnx2_TPAT_b06FwBssAddr;
2466 fw.bss_len = bnx2_TPAT_b06FwBssLen;
2467 fw.bss_index = 0;
2468 fw.bss = bnx2_TPAT_b06FwBss;
2469
2470 fw.rodata_addr = bnx2_TPAT_b06FwRodataAddr;
2471 fw.rodata_len = bnx2_TPAT_b06FwRodataLen;
2472 fw.rodata_index = 0;
2473 fw.rodata = bnx2_TPAT_b06FwRodata;
2474
2475 load_cpu_fw(bp, &cpu_reg, &fw);
2476
2477 /* Initialize the Completion Processor. */
2478 cpu_reg.mode = BNX2_COM_CPU_MODE;
2479 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
2480 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
2481 cpu_reg.state = BNX2_COM_CPU_STATE;
2482 cpu_reg.state_value_clear = 0xffffff;
2483 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
2484 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
2485 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
2486 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
2487 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
2488 cpu_reg.spad_base = BNX2_COM_SCRATCH;
2489 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 2490
b6016b76
MC
2491 fw.ver_major = bnx2_COM_b06FwReleaseMajor;
2492 fw.ver_minor = bnx2_COM_b06FwReleaseMinor;
2493 fw.ver_fix = bnx2_COM_b06FwReleaseFix;
2494 fw.start_addr = bnx2_COM_b06FwStartAddr;
2495
2496 fw.text_addr = bnx2_COM_b06FwTextAddr;
2497 fw.text_len = bnx2_COM_b06FwTextLen;
2498 fw.text_index = 0;
fba9fe91
MC
2499
2500 rc = bnx2_gunzip(bp, bnx2_COM_b06FwText, sizeof(bnx2_COM_b06FwText),
2501 &text, &text_len);
2502 if (rc)
2503 goto init_cpu_err;
2504
2505 fw.text = text;
b6016b76
MC
2506
2507 fw.data_addr = bnx2_COM_b06FwDataAddr;
2508 fw.data_len = bnx2_COM_b06FwDataLen;
2509 fw.data_index = 0;
2510 fw.data = bnx2_COM_b06FwData;
2511
2512 fw.sbss_addr = bnx2_COM_b06FwSbssAddr;
2513 fw.sbss_len = bnx2_COM_b06FwSbssLen;
2514 fw.sbss_index = 0;
2515 fw.sbss = bnx2_COM_b06FwSbss;
2516
2517 fw.bss_addr = bnx2_COM_b06FwBssAddr;
2518 fw.bss_len = bnx2_COM_b06FwBssLen;
2519 fw.bss_index = 0;
2520 fw.bss = bnx2_COM_b06FwBss;
2521
2522 fw.rodata_addr = bnx2_COM_b06FwRodataAddr;
2523 fw.rodata_len = bnx2_COM_b06FwRodataLen;
2524 fw.rodata_index = 0;
2525 fw.rodata = bnx2_COM_b06FwRodata;
2526
2527 load_cpu_fw(bp, &cpu_reg, &fw);
2528
fba9fe91
MC
2529init_cpu_err:
2530 bnx2_gunzip_end(bp);
2531 return rc;
b6016b76
MC
2532}
2533
2534static int
829ca9a3 2535bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
b6016b76
MC
2536{
2537 u16 pmcsr;
2538
2539 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
2540
2541 switch (state) {
829ca9a3 2542 case PCI_D0: {
b6016b76
MC
2543 u32 val;
2544
2545 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2546 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
2547 PCI_PM_CTRL_PME_STATUS);
2548
2549 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
2550 /* delay required during transition out of D3hot */
2551 msleep(20);
2552
2553 val = REG_RD(bp, BNX2_EMAC_MODE);
2554 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
2555 val &= ~BNX2_EMAC_MODE_MPKT;
2556 REG_WR(bp, BNX2_EMAC_MODE, val);
2557
2558 val = REG_RD(bp, BNX2_RPM_CONFIG);
2559 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2560 REG_WR(bp, BNX2_RPM_CONFIG, val);
2561 break;
2562 }
829ca9a3 2563 case PCI_D3hot: {
b6016b76
MC
2564 int i;
2565 u32 val, wol_msg;
2566
2567 if (bp->wol) {
2568 u32 advertising;
2569 u8 autoneg;
2570
2571 autoneg = bp->autoneg;
2572 advertising = bp->advertising;
2573
2574 bp->autoneg = AUTONEG_SPEED;
2575 bp->advertising = ADVERTISED_10baseT_Half |
2576 ADVERTISED_10baseT_Full |
2577 ADVERTISED_100baseT_Half |
2578 ADVERTISED_100baseT_Full |
2579 ADVERTISED_Autoneg;
2580
2581 bnx2_setup_copper_phy(bp);
2582
2583 bp->autoneg = autoneg;
2584 bp->advertising = advertising;
2585
2586 bnx2_set_mac_addr(bp);
2587
2588 val = REG_RD(bp, BNX2_EMAC_MODE);
2589
2590 /* Enable port mode. */
2591 val &= ~BNX2_EMAC_MODE_PORT;
2592 val |= BNX2_EMAC_MODE_PORT_MII |
2593 BNX2_EMAC_MODE_MPKT_RCVD |
2594 BNX2_EMAC_MODE_ACPI_RCVD |
b6016b76
MC
2595 BNX2_EMAC_MODE_MPKT;
2596
2597 REG_WR(bp, BNX2_EMAC_MODE, val);
2598
2599 /* receive all multicast */
2600 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2601 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2602 0xffffffff);
2603 }
2604 REG_WR(bp, BNX2_EMAC_RX_MODE,
2605 BNX2_EMAC_RX_MODE_SORT_MODE);
2606
2607 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
2608 BNX2_RPM_SORT_USER0_MC_EN;
2609 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2610 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
2611 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
2612 BNX2_RPM_SORT_USER0_ENA);
2613
2614 /* Need to enable EMAC and RPM for WOL. */
2615 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2616 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
2617 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
2618 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
2619
2620 val = REG_RD(bp, BNX2_RPM_CONFIG);
2621 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
2622 REG_WR(bp, BNX2_RPM_CONFIG, val);
2623
2624 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
2625 }
2626 else {
2627 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
2628 }
2629
dda1e390
MC
2630 if (!(bp->flags & NO_WOL_FLAG))
2631 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
b6016b76
MC
2632
2633 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
2634 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
2635 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
2636
2637 if (bp->wol)
2638 pmcsr |= 3;
2639 }
2640 else {
2641 pmcsr |= 3;
2642 }
2643 if (bp->wol) {
2644 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
2645 }
2646 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
2647 pmcsr);
2648
2649 /* No more memory access after this point until
2650 * device is brought back to D0.
2651 */
2652 udelay(50);
2653 break;
2654 }
2655 default:
2656 return -EINVAL;
2657 }
2658 return 0;
2659}
2660
2661static int
2662bnx2_acquire_nvram_lock(struct bnx2 *bp)
2663{
2664 u32 val;
2665 int j;
2666
2667 /* Request access to the flash interface. */
2668 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
2669 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2670 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2671 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
2672 break;
2673
2674 udelay(5);
2675 }
2676
2677 if (j >= NVRAM_TIMEOUT_COUNT)
2678 return -EBUSY;
2679
2680 return 0;
2681}
2682
2683static int
2684bnx2_release_nvram_lock(struct bnx2 *bp)
2685{
2686 int j;
2687 u32 val;
2688
2689 /* Relinquish nvram interface. */
2690 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
2691
2692 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2693 val = REG_RD(bp, BNX2_NVM_SW_ARB);
2694 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
2695 break;
2696
2697 udelay(5);
2698 }
2699
2700 if (j >= NVRAM_TIMEOUT_COUNT)
2701 return -EBUSY;
2702
2703 return 0;
2704}
2705
2706
2707static int
2708bnx2_enable_nvram_write(struct bnx2 *bp)
2709{
2710 u32 val;
2711
2712 val = REG_RD(bp, BNX2_MISC_CFG);
2713 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
2714
2715 if (!bp->flash_info->buffered) {
2716 int j;
2717
2718 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2719 REG_WR(bp, BNX2_NVM_COMMAND,
2720 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
2721
2722 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2723 udelay(5);
2724
2725 val = REG_RD(bp, BNX2_NVM_COMMAND);
2726 if (val & BNX2_NVM_COMMAND_DONE)
2727 break;
2728 }
2729
2730 if (j >= NVRAM_TIMEOUT_COUNT)
2731 return -EBUSY;
2732 }
2733 return 0;
2734}
2735
2736static void
2737bnx2_disable_nvram_write(struct bnx2 *bp)
2738{
2739 u32 val;
2740
2741 val = REG_RD(bp, BNX2_MISC_CFG);
2742 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
2743}
2744
2745
2746static void
2747bnx2_enable_nvram_access(struct bnx2 *bp)
2748{
2749 u32 val;
2750
2751 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2752 /* Enable both bits, even on read. */
6aa20a22 2753 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
b6016b76
MC
2754 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
2755}
2756
2757static void
2758bnx2_disable_nvram_access(struct bnx2 *bp)
2759{
2760 u32 val;
2761
2762 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
2763 /* Disable both bits, even after read. */
6aa20a22 2764 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
b6016b76
MC
2765 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
2766 BNX2_NVM_ACCESS_ENABLE_WR_EN));
2767}
2768
2769static int
2770bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
2771{
2772 u32 cmd;
2773 int j;
2774
2775 if (bp->flash_info->buffered)
2776 /* Buffered flash, no erase needed */
2777 return 0;
2778
2779 /* Build an erase command */
2780 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
2781 BNX2_NVM_COMMAND_DOIT;
2782
2783 /* Need to clear DONE bit separately. */
2784 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2785
2786 /* Address of the NVRAM to read from. */
2787 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2788
2789 /* Issue an erase command. */
2790 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2791
2792 /* Wait for completion. */
2793 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2794 u32 val;
2795
2796 udelay(5);
2797
2798 val = REG_RD(bp, BNX2_NVM_COMMAND);
2799 if (val & BNX2_NVM_COMMAND_DONE)
2800 break;
2801 }
2802
2803 if (j >= NVRAM_TIMEOUT_COUNT)
2804 return -EBUSY;
2805
2806 return 0;
2807}
2808
2809static int
2810bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
2811{
2812 u32 cmd;
2813 int j;
2814
2815 /* Build the command word. */
2816 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
2817
2818 /* Calculate an offset of a buffered flash. */
2819 if (bp->flash_info->buffered) {
2820 offset = ((offset / bp->flash_info->page_size) <<
2821 bp->flash_info->page_bits) +
2822 (offset % bp->flash_info->page_size);
2823 }
2824
2825 /* Need to clear DONE bit separately. */
2826 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2827
2828 /* Address of the NVRAM to read from. */
2829 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2830
2831 /* Issue a read command. */
2832 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2833
2834 /* Wait for completion. */
2835 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2836 u32 val;
2837
2838 udelay(5);
2839
2840 val = REG_RD(bp, BNX2_NVM_COMMAND);
2841 if (val & BNX2_NVM_COMMAND_DONE) {
2842 val = REG_RD(bp, BNX2_NVM_READ);
2843
2844 val = be32_to_cpu(val);
2845 memcpy(ret_val, &val, 4);
2846 break;
2847 }
2848 }
2849 if (j >= NVRAM_TIMEOUT_COUNT)
2850 return -EBUSY;
2851
2852 return 0;
2853}
2854
2855
2856static int
2857bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
2858{
2859 u32 cmd, val32;
2860 int j;
2861
2862 /* Build the command word. */
2863 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
2864
2865 /* Calculate an offset of a buffered flash. */
2866 if (bp->flash_info->buffered) {
2867 offset = ((offset / bp->flash_info->page_size) <<
2868 bp->flash_info->page_bits) +
2869 (offset % bp->flash_info->page_size);
2870 }
2871
2872 /* Need to clear DONE bit separately. */
2873 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
2874
2875 memcpy(&val32, val, 4);
2876 val32 = cpu_to_be32(val32);
2877
2878 /* Write the data. */
2879 REG_WR(bp, BNX2_NVM_WRITE, val32);
2880
2881 /* Address of the NVRAM to write to. */
2882 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
2883
2884 /* Issue the write command. */
2885 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
2886
2887 /* Wait for completion. */
2888 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
2889 udelay(5);
2890
2891 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
2892 break;
2893 }
2894 if (j >= NVRAM_TIMEOUT_COUNT)
2895 return -EBUSY;
2896
2897 return 0;
2898}
2899
2900static int
2901bnx2_init_nvram(struct bnx2 *bp)
2902{
2903 u32 val;
2904 int j, entry_count, rc;
2905 struct flash_spec *flash;
2906
2907 /* Determine the selected interface. */
2908 val = REG_RD(bp, BNX2_NVM_CFG1);
2909
2910 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
2911
2912 rc = 0;
2913 if (val & 0x40000000) {
2914
2915 /* Flash interface has been reconfigured */
2916 for (j = 0, flash = &flash_table[0]; j < entry_count;
37137709
MC
2917 j++, flash++) {
2918 if ((val & FLASH_BACKUP_STRAP_MASK) ==
2919 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
b6016b76
MC
2920 bp->flash_info = flash;
2921 break;
2922 }
2923 }
2924 }
2925 else {
37137709 2926 u32 mask;
b6016b76
MC
2927 /* Not yet been reconfigured */
2928
37137709
MC
2929 if (val & (1 << 23))
2930 mask = FLASH_BACKUP_STRAP_MASK;
2931 else
2932 mask = FLASH_STRAP_MASK;
2933
b6016b76
MC
2934 for (j = 0, flash = &flash_table[0]; j < entry_count;
2935 j++, flash++) {
2936
37137709 2937 if ((val & mask) == (flash->strapping & mask)) {
b6016b76
MC
2938 bp->flash_info = flash;
2939
2940 /* Request access to the flash interface. */
2941 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2942 return rc;
2943
2944 /* Enable access to flash interface */
2945 bnx2_enable_nvram_access(bp);
2946
2947 /* Reconfigure the flash interface */
2948 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
2949 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
2950 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
2951 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
2952
2953 /* Disable access to flash interface */
2954 bnx2_disable_nvram_access(bp);
2955 bnx2_release_nvram_lock(bp);
2956
2957 break;
2958 }
2959 }
2960 } /* if (val & 0x40000000) */
2961
2962 if (j == entry_count) {
2963 bp->flash_info = NULL;
2f23c523 2964 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
1122db71 2965 return -ENODEV;
b6016b76
MC
2966 }
2967
1122db71
MC
2968 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
2969 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
2970 if (val)
2971 bp->flash_size = val;
2972 else
2973 bp->flash_size = bp->flash_info->total_size;
2974
b6016b76
MC
2975 return rc;
2976}
2977
2978static int
2979bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
2980 int buf_size)
2981{
2982 int rc = 0;
2983 u32 cmd_flags, offset32, len32, extra;
2984
2985 if (buf_size == 0)
2986 return 0;
2987
2988 /* Request access to the flash interface. */
2989 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
2990 return rc;
2991
2992 /* Enable access to flash interface */
2993 bnx2_enable_nvram_access(bp);
2994
2995 len32 = buf_size;
2996 offset32 = offset;
2997 extra = 0;
2998
2999 cmd_flags = 0;
3000
3001 if (offset32 & 3) {
3002 u8 buf[4];
3003 u32 pre_len;
3004
3005 offset32 &= ~3;
3006 pre_len = 4 - (offset & 3);
3007
3008 if (pre_len >= len32) {
3009 pre_len = len32;
3010 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3011 BNX2_NVM_COMMAND_LAST;
3012 }
3013 else {
3014 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3015 }
3016
3017 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3018
3019 if (rc)
3020 return rc;
3021
3022 memcpy(ret_buf, buf + (offset & 3), pre_len);
3023
3024 offset32 += 4;
3025 ret_buf += pre_len;
3026 len32 -= pre_len;
3027 }
3028 if (len32 & 3) {
3029 extra = 4 - (len32 & 3);
3030 len32 = (len32 + 4) & ~3;
3031 }
3032
3033 if (len32 == 4) {
3034 u8 buf[4];
3035
3036 if (cmd_flags)
3037 cmd_flags = BNX2_NVM_COMMAND_LAST;
3038 else
3039 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3040 BNX2_NVM_COMMAND_LAST;
3041
3042 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3043
3044 memcpy(ret_buf, buf, 4 - extra);
3045 }
3046 else if (len32 > 0) {
3047 u8 buf[4];
3048
3049 /* Read the first word. */
3050 if (cmd_flags)
3051 cmd_flags = 0;
3052 else
3053 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3054
3055 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3056
3057 /* Advance to the next dword. */
3058 offset32 += 4;
3059 ret_buf += 4;
3060 len32 -= 4;
3061
3062 while (len32 > 4 && rc == 0) {
3063 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3064
3065 /* Advance to the next dword. */
3066 offset32 += 4;
3067 ret_buf += 4;
3068 len32 -= 4;
3069 }
3070
3071 if (rc)
3072 return rc;
3073
3074 cmd_flags = BNX2_NVM_COMMAND_LAST;
3075 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3076
3077 memcpy(ret_buf, buf, 4 - extra);
3078 }
3079
3080 /* Disable access to flash interface */
3081 bnx2_disable_nvram_access(bp);
3082
3083 bnx2_release_nvram_lock(bp);
3084
3085 return rc;
3086}
3087
3088static int
3089bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3090 int buf_size)
3091{
3092 u32 written, offset32, len32;
ae181bc4 3093 u8 *buf, start[4], end[4], *flash_buffer = NULL;
b6016b76
MC
3094 int rc = 0;
3095 int align_start, align_end;
3096
3097 buf = data_buf;
3098 offset32 = offset;
3099 len32 = buf_size;
3100 align_start = align_end = 0;
3101
3102 if ((align_start = (offset32 & 3))) {
3103 offset32 &= ~3;
3104 len32 += align_start;
3105 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3106 return rc;
3107 }
3108
3109 if (len32 & 3) {
3110 if ((len32 > 4) || !align_start) {
3111 align_end = 4 - (len32 & 3);
3112 len32 += align_end;
3113 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4,
3114 end, 4))) {
3115 return rc;
3116 }
3117 }
3118 }
3119
3120 if (align_start || align_end) {
3121 buf = kmalloc(len32, GFP_KERNEL);
3122 if (buf == 0)
3123 return -ENOMEM;
3124 if (align_start) {
3125 memcpy(buf, start, 4);
3126 }
3127 if (align_end) {
3128 memcpy(buf + len32 - 4, end, 4);
3129 }
3130 memcpy(buf + align_start, data_buf, buf_size);
3131 }
3132
ae181bc4
MC
3133 if (bp->flash_info->buffered == 0) {
3134 flash_buffer = kmalloc(264, GFP_KERNEL);
3135 if (flash_buffer == NULL) {
3136 rc = -ENOMEM;
3137 goto nvram_write_end;
3138 }
3139 }
3140
b6016b76
MC
3141 written = 0;
3142 while ((written < len32) && (rc == 0)) {
3143 u32 page_start, page_end, data_start, data_end;
3144 u32 addr, cmd_flags;
3145 int i;
b6016b76
MC
3146
3147 /* Find the page_start addr */
3148 page_start = offset32 + written;
3149 page_start -= (page_start % bp->flash_info->page_size);
3150 /* Find the page_end addr */
3151 page_end = page_start + bp->flash_info->page_size;
3152 /* Find the data_start addr */
3153 data_start = (written == 0) ? offset32 : page_start;
3154 /* Find the data_end addr */
6aa20a22 3155 data_end = (page_end > offset32 + len32) ?
b6016b76
MC
3156 (offset32 + len32) : page_end;
3157
3158 /* Request access to the flash interface. */
3159 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3160 goto nvram_write_end;
3161
3162 /* Enable access to flash interface */
3163 bnx2_enable_nvram_access(bp);
3164
3165 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3166 if (bp->flash_info->buffered == 0) {
3167 int j;
3168
3169 /* Read the whole page into the buffer
3170 * (non-buffer flash only) */
3171 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3172 if (j == (bp->flash_info->page_size - 4)) {
3173 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3174 }
3175 rc = bnx2_nvram_read_dword(bp,
6aa20a22
JG
3176 page_start + j,
3177 &flash_buffer[j],
b6016b76
MC
3178 cmd_flags);
3179
3180 if (rc)
3181 goto nvram_write_end;
3182
3183 cmd_flags = 0;
3184 }
3185 }
3186
3187 /* Enable writes to flash interface (unlock write-protect) */
3188 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3189 goto nvram_write_end;
3190
3191 /* Erase the page */
3192 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3193 goto nvram_write_end;
3194
3195 /* Re-enable the write again for the actual write */
3196 bnx2_enable_nvram_write(bp);
3197
3198 /* Loop to write back the buffer data from page_start to
3199 * data_start */
3200 i = 0;
3201 if (bp->flash_info->buffered == 0) {
3202 for (addr = page_start; addr < data_start;
3203 addr += 4, i += 4) {
6aa20a22 3204
b6016b76
MC
3205 rc = bnx2_nvram_write_dword(bp, addr,
3206 &flash_buffer[i], cmd_flags);
3207
3208 if (rc != 0)
3209 goto nvram_write_end;
3210
3211 cmd_flags = 0;
3212 }
3213 }
3214
3215 /* Loop to write the new data from data_start to data_end */
bae25761 3216 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
b6016b76
MC
3217 if ((addr == page_end - 4) ||
3218 ((bp->flash_info->buffered) &&
3219 (addr == data_end - 4))) {
3220
3221 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3222 }
3223 rc = bnx2_nvram_write_dword(bp, addr, buf,
3224 cmd_flags);
3225
3226 if (rc != 0)
3227 goto nvram_write_end;
3228
3229 cmd_flags = 0;
3230 buf += 4;
3231 }
3232
3233 /* Loop to write back the buffer data from data_end
3234 * to page_end */
3235 if (bp->flash_info->buffered == 0) {
3236 for (addr = data_end; addr < page_end;
3237 addr += 4, i += 4) {
6aa20a22 3238
b6016b76
MC
3239 if (addr == page_end-4) {
3240 cmd_flags = BNX2_NVM_COMMAND_LAST;
3241 }
3242 rc = bnx2_nvram_write_dword(bp, addr,
3243 &flash_buffer[i], cmd_flags);
3244
3245 if (rc != 0)
3246 goto nvram_write_end;
3247
3248 cmd_flags = 0;
3249 }
3250 }
3251
3252 /* Disable writes to flash interface (lock write-protect) */
3253 bnx2_disable_nvram_write(bp);
3254
3255 /* Disable access to flash interface */
3256 bnx2_disable_nvram_access(bp);
3257 bnx2_release_nvram_lock(bp);
3258
3259 /* Increment written */
3260 written += data_end - data_start;
3261 }
3262
3263nvram_write_end:
ae181bc4
MC
3264 if (bp->flash_info->buffered == 0)
3265 kfree(flash_buffer);
3266
b6016b76
MC
3267 if (align_start || align_end)
3268 kfree(buf);
3269 return rc;
3270}
3271
3272static int
3273bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3274{
3275 u32 val;
3276 int i, rc = 0;
3277
3278 /* Wait for the current PCI transaction to complete before
3279 * issuing a reset. */
3280 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3281 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3282 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3283 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3284 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3285 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3286 udelay(5);
3287
b090ae2b
MC
3288 /* Wait for the firmware to tell us it is ok to issue a reset. */
3289 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3290
b6016b76
MC
3291 /* Deposit a driver reset signature so the firmware knows that
3292 * this is a soft reset. */
e3648b3d 3293 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
b6016b76
MC
3294 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3295
b6016b76
MC
3296 /* Do a dummy read to force the chip to complete all current transaction
3297 * before we issue a reset. */
3298 val = REG_RD(bp, BNX2_MISC_ID);
3299
3300 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3301 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3302 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3303
3304 /* Chip reset. */
3305 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3306
3307 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3308 (CHIP_ID(bp) == CHIP_ID_5706_A1))
3309 msleep(15);
3310
3311 /* Reset takes approximate 30 usec */
3312 for (i = 0; i < 10; i++) {
3313 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3314 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3315 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0) {
3316 break;
3317 }
3318 udelay(10);
3319 }
3320
3321 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3322 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3323 printk(KERN_ERR PFX "Chip reset did not complete\n");
3324 return -EBUSY;
3325 }
3326
3327 /* Make sure byte swapping is properly configured. */
3328 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3329 if (val != 0x01020304) {
3330 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3331 return -ENODEV;
3332 }
3333
b6016b76 3334 /* Wait for the firmware to finish its initialization. */
b090ae2b
MC
3335 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3336 if (rc)
3337 return rc;
b6016b76
MC
3338
3339 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3340 /* Adjust the voltage regular to two steps lower. The default
3341 * of this register is 0x0000000e. */
3342 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3343
3344 /* Remove bad rbuf memory from the free pool. */
3345 rc = bnx2_alloc_bad_rbuf(bp);
3346 }
3347
3348 return rc;
3349}
3350
3351static int
3352bnx2_init_chip(struct bnx2 *bp)
3353{
3354 u32 val;
b090ae2b 3355 int rc;
b6016b76
MC
3356
3357 /* Make sure the interrupt is not active. */
3358 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3359
3360 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3361 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3362#ifdef __BIG_ENDIAN
6aa20a22 3363 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
b6016b76 3364#endif
6aa20a22 3365 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
b6016b76
MC
3366 DMA_READ_CHANS << 12 |
3367 DMA_WRITE_CHANS << 16;
3368
3369 val |= (0x2 << 20) | (1 << 11);
3370
dda1e390 3371 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
b6016b76
MC
3372 val |= (1 << 23);
3373
3374 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3375 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3376 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3377
3378 REG_WR(bp, BNX2_DMA_CONFIG, val);
3379
3380 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3381 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3382 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3383 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3384 }
3385
3386 if (bp->flags & PCIX_FLAG) {
3387 u16 val16;
3388
3389 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3390 &val16);
3391 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3392 val16 & ~PCI_X_CMD_ERO);
3393 }
3394
3395 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3396 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3397 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3398 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3399
3400 /* Initialize context mapping and zero out the quick contexts. The
3401 * context block must have already been enabled. */
3402 bnx2_init_context(bp);
3403
fba9fe91
MC
3404 if ((rc = bnx2_init_cpus(bp)) != 0)
3405 return rc;
3406
b6016b76
MC
3407 bnx2_init_nvram(bp);
3408
3409 bnx2_set_mac_addr(bp);
3410
3411 val = REG_RD(bp, BNX2_MQ_CONFIG);
3412 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
3413 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
3414 REG_WR(bp, BNX2_MQ_CONFIG, val);
3415
3416 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
3417 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
3418 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
3419
3420 val = (BCM_PAGE_BITS - 8) << 24;
3421 REG_WR(bp, BNX2_RV2P_CONFIG, val);
3422
3423 /* Configure page size. */
3424 val = REG_RD(bp, BNX2_TBDR_CONFIG);
3425 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
3426 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
3427 REG_WR(bp, BNX2_TBDR_CONFIG, val);
3428
3429 val = bp->mac_addr[0] +
3430 (bp->mac_addr[1] << 8) +
3431 (bp->mac_addr[2] << 16) +
3432 bp->mac_addr[3] +
3433 (bp->mac_addr[4] << 8) +
3434 (bp->mac_addr[5] << 16);
3435 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
3436
3437 /* Program the MTU. Also include 4 bytes for CRC32. */
3438 val = bp->dev->mtu + ETH_HLEN + 4;
3439 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
3440 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
3441 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
3442
3443 bp->last_status_idx = 0;
3444 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
3445
3446 /* Set up how to generate a link change interrupt. */
3447 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
3448
3449 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
3450 (u64) bp->status_blk_mapping & 0xffffffff);
3451 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
3452
3453 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
3454 (u64) bp->stats_blk_mapping & 0xffffffff);
3455 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
3456 (u64) bp->stats_blk_mapping >> 32);
3457
6aa20a22 3458 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
b6016b76
MC
3459 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
3460
3461 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
3462 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
3463
3464 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
3465 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
3466
3467 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
3468
3469 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
3470
3471 REG_WR(bp, BNX2_HC_COM_TICKS,
3472 (bp->com_ticks_int << 16) | bp->com_ticks);
3473
3474 REG_WR(bp, BNX2_HC_CMD_TICKS,
3475 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
3476
3477 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
3478 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
3479
3480 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
3481 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_COLLECT_STATS);
3482 else {
3483 REG_WR(bp, BNX2_HC_CONFIG, BNX2_HC_CONFIG_RX_TMR_MODE |
3484 BNX2_HC_CONFIG_TX_TMR_MODE |
3485 BNX2_HC_CONFIG_COLLECT_STATS);
3486 }
3487
3488 /* Clear internal stats counters. */
3489 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
3490
3491 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_BITS_LINK_STATE);
3492
e29054f9
MC
3493 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
3494 BNX2_PORT_FEATURE_ASF_ENABLED)
3495 bp->flags |= ASF_ENABLE_FLAG;
3496
b6016b76
MC
3497 /* Initialize the receive filter. */
3498 bnx2_set_rx_mode(bp->dev);
3499
b090ae2b
MC
3500 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
3501 0);
b6016b76
MC
3502
3503 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
3504 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
3505
3506 udelay(20);
3507
bf5295bb
MC
3508 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
3509
b090ae2b 3510 return rc;
b6016b76
MC
3511}
3512
3513
3514static void
3515bnx2_init_tx_ring(struct bnx2 *bp)
3516{
3517 struct tx_bd *txbd;
3518 u32 val;
3519
2f8af120
MC
3520 bp->tx_wake_thresh = bp->tx_ring_size / 2;
3521
b6016b76 3522 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
6aa20a22 3523
b6016b76
MC
3524 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
3525 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
3526
3527 bp->tx_prod = 0;
3528 bp->tx_cons = 0;
f4e418f7 3529 bp->hw_tx_cons = 0;
b6016b76 3530 bp->tx_prod_bseq = 0;
6aa20a22 3531
b6016b76
MC
3532 val = BNX2_L2CTX_TYPE_TYPE_L2;
3533 val |= BNX2_L2CTX_TYPE_SIZE_L2;
3534 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TYPE, val);
3535
3536 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2;
3537 val |= 8 << 16;
3538 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_CMD_TYPE, val);
3539
3540 val = (u64) bp->tx_desc_mapping >> 32;
3541 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_HI, val);
3542
3543 val = (u64) bp->tx_desc_mapping & 0xffffffff;
3544 CTX_WR(bp, GET_CID_ADDR(TX_CID), BNX2_L2CTX_TBDR_BHADDR_LO, val);
3545}
3546
3547static void
3548bnx2_init_rx_ring(struct bnx2 *bp)
3549{
3550 struct rx_bd *rxbd;
3551 int i;
6aa20a22 3552 u16 prod, ring_prod;
b6016b76
MC
3553 u32 val;
3554
3555 /* 8 for CRC and VLAN */
3556 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
3557 /* 8 for alignment */
3558 bp->rx_buf_size = bp->rx_buf_use_size + 8;
3559
3560 ring_prod = prod = bp->rx_prod = 0;
3561 bp->rx_cons = 0;
f4e418f7 3562 bp->hw_rx_cons = 0;
b6016b76 3563 bp->rx_prod_bseq = 0;
6aa20a22 3564
13daffa2
MC
3565 for (i = 0; i < bp->rx_max_ring; i++) {
3566 int j;
b6016b76 3567
13daffa2
MC
3568 rxbd = &bp->rx_desc_ring[i][0];
3569 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
3570 rxbd->rx_bd_len = bp->rx_buf_use_size;
3571 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
3572 }
3573 if (i == (bp->rx_max_ring - 1))
3574 j = 0;
3575 else
3576 j = i + 1;
3577 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
3578 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
3579 0xffffffff;
3580 }
b6016b76
MC
3581
3582 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
3583 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
3584 val |= 0x02 << 8;
3585 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
3586
13daffa2 3587 val = (u64) bp->rx_desc_mapping[0] >> 32;
b6016b76
MC
3588 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
3589
13daffa2 3590 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
b6016b76
MC
3591 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
3592
236b6394 3593 for (i = 0; i < bp->rx_ring_size; i++) {
b6016b76
MC
3594 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
3595 break;
3596 }
3597 prod = NEXT_RX_BD(prod);
3598 ring_prod = RX_RING_IDX(prod);
3599 }
3600 bp->rx_prod = prod;
3601
3602 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
3603
3604 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
3605}
3606
13daffa2
MC
3607static void
3608bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
3609{
3610 u32 num_rings, max;
3611
3612 bp->rx_ring_size = size;
3613 num_rings = 1;
3614 while (size > MAX_RX_DESC_CNT) {
3615 size -= MAX_RX_DESC_CNT;
3616 num_rings++;
3617 }
3618 /* round to next power of 2 */
3619 max = MAX_RX_RINGS;
3620 while ((max & num_rings) == 0)
3621 max >>= 1;
3622
3623 if (num_rings != max)
3624 max <<= 1;
3625
3626 bp->rx_max_ring = max;
3627 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
3628}
3629
b6016b76
MC
3630static void
3631bnx2_free_tx_skbs(struct bnx2 *bp)
3632{
3633 int i;
3634
3635 if (bp->tx_buf_ring == NULL)
3636 return;
3637
3638 for (i = 0; i < TX_DESC_CNT; ) {
3639 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
3640 struct sk_buff *skb = tx_buf->skb;
3641 int j, last;
3642
3643 if (skb == NULL) {
3644 i++;
3645 continue;
3646 }
3647
3648 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
3649 skb_headlen(skb), PCI_DMA_TODEVICE);
3650
3651 tx_buf->skb = NULL;
3652
3653 last = skb_shinfo(skb)->nr_frags;
3654 for (j = 0; j < last; j++) {
3655 tx_buf = &bp->tx_buf_ring[i + j + 1];
3656 pci_unmap_page(bp->pdev,
3657 pci_unmap_addr(tx_buf, mapping),
3658 skb_shinfo(skb)->frags[j].size,
3659 PCI_DMA_TODEVICE);
3660 }
745720e5 3661 dev_kfree_skb(skb);
b6016b76
MC
3662 i += j + 1;
3663 }
3664
3665}
3666
3667static void
3668bnx2_free_rx_skbs(struct bnx2 *bp)
3669{
3670 int i;
3671
3672 if (bp->rx_buf_ring == NULL)
3673 return;
3674
13daffa2 3675 for (i = 0; i < bp->rx_max_ring_idx; i++) {
b6016b76
MC
3676 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
3677 struct sk_buff *skb = rx_buf->skb;
3678
05d0f1cf 3679 if (skb == NULL)
b6016b76
MC
3680 continue;
3681
3682 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
3683 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
3684
3685 rx_buf->skb = NULL;
3686
745720e5 3687 dev_kfree_skb(skb);
b6016b76
MC
3688 }
3689}
3690
3691static void
3692bnx2_free_skbs(struct bnx2 *bp)
3693{
3694 bnx2_free_tx_skbs(bp);
3695 bnx2_free_rx_skbs(bp);
3696}
3697
3698static int
3699bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
3700{
3701 int rc;
3702
3703 rc = bnx2_reset_chip(bp, reset_code);
3704 bnx2_free_skbs(bp);
3705 if (rc)
3706 return rc;
3707
fba9fe91
MC
3708 if ((rc = bnx2_init_chip(bp)) != 0)
3709 return rc;
3710
b6016b76
MC
3711 bnx2_init_tx_ring(bp);
3712 bnx2_init_rx_ring(bp);
3713 return 0;
3714}
3715
3716static int
3717bnx2_init_nic(struct bnx2 *bp)
3718{
3719 int rc;
3720
3721 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
3722 return rc;
3723
80be4434 3724 spin_lock_bh(&bp->phy_lock);
b6016b76 3725 bnx2_init_phy(bp);
80be4434 3726 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
3727 bnx2_set_link(bp);
3728 return 0;
3729}
3730
3731static int
3732bnx2_test_registers(struct bnx2 *bp)
3733{
3734 int ret;
3735 int i;
f71e1309 3736 static const struct {
b6016b76
MC
3737 u16 offset;
3738 u16 flags;
3739 u32 rw_mask;
3740 u32 ro_mask;
3741 } reg_tbl[] = {
3742 { 0x006c, 0, 0x00000000, 0x0000003f },
3743 { 0x0090, 0, 0xffffffff, 0x00000000 },
3744 { 0x0094, 0, 0x00000000, 0x00000000 },
3745
3746 { 0x0404, 0, 0x00003f00, 0x00000000 },
3747 { 0x0418, 0, 0x00000000, 0xffffffff },
3748 { 0x041c, 0, 0x00000000, 0xffffffff },
3749 { 0x0420, 0, 0x00000000, 0x80ffffff },
3750 { 0x0424, 0, 0x00000000, 0x00000000 },
3751 { 0x0428, 0, 0x00000000, 0x00000001 },
3752 { 0x0450, 0, 0x00000000, 0x0000ffff },
3753 { 0x0454, 0, 0x00000000, 0xffffffff },
3754 { 0x0458, 0, 0x00000000, 0xffffffff },
3755
3756 { 0x0808, 0, 0x00000000, 0xffffffff },
3757 { 0x0854, 0, 0x00000000, 0xffffffff },
3758 { 0x0868, 0, 0x00000000, 0x77777777 },
3759 { 0x086c, 0, 0x00000000, 0x77777777 },
3760 { 0x0870, 0, 0x00000000, 0x77777777 },
3761 { 0x0874, 0, 0x00000000, 0x77777777 },
3762
3763 { 0x0c00, 0, 0x00000000, 0x00000001 },
3764 { 0x0c04, 0, 0x00000000, 0x03ff0001 },
3765 { 0x0c08, 0, 0x0f0ff073, 0x00000000 },
b6016b76
MC
3766
3767 { 0x1000, 0, 0x00000000, 0x00000001 },
3768 { 0x1004, 0, 0x00000000, 0x000f0001 },
b6016b76
MC
3769
3770 { 0x1408, 0, 0x01c00800, 0x00000000 },
3771 { 0x149c, 0, 0x8000ffff, 0x00000000 },
3772 { 0x14a8, 0, 0x00000000, 0x000001ff },
5b0c76ad 3773 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
b6016b76
MC
3774 { 0x14b0, 0, 0x00000002, 0x00000001 },
3775 { 0x14b8, 0, 0x00000000, 0x00000000 },
3776 { 0x14c0, 0, 0x00000000, 0x00000009 },
3777 { 0x14c4, 0, 0x00003fff, 0x00000000 },
3778 { 0x14cc, 0, 0x00000000, 0x00000001 },
3779 { 0x14d0, 0, 0xffffffff, 0x00000000 },
b6016b76
MC
3780
3781 { 0x1800, 0, 0x00000000, 0x00000001 },
3782 { 0x1804, 0, 0x00000000, 0x00000003 },
b6016b76
MC
3783
3784 { 0x2800, 0, 0x00000000, 0x00000001 },
3785 { 0x2804, 0, 0x00000000, 0x00003f01 },
3786 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
3787 { 0x2810, 0, 0xffff0000, 0x00000000 },
3788 { 0x2814, 0, 0xffff0000, 0x00000000 },
3789 { 0x2818, 0, 0xffff0000, 0x00000000 },
3790 { 0x281c, 0, 0xffff0000, 0x00000000 },
3791 { 0x2834, 0, 0xffffffff, 0x00000000 },
3792 { 0x2840, 0, 0x00000000, 0xffffffff },
3793 { 0x2844, 0, 0x00000000, 0xffffffff },
3794 { 0x2848, 0, 0xffffffff, 0x00000000 },
3795 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
3796
3797 { 0x2c00, 0, 0x00000000, 0x00000011 },
3798 { 0x2c04, 0, 0x00000000, 0x00030007 },
3799
b6016b76
MC
3800 { 0x3c00, 0, 0x00000000, 0x00000001 },
3801 { 0x3c04, 0, 0x00000000, 0x00070000 },
3802 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
3803 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
3804 { 0x3c10, 0, 0xffffffff, 0x00000000 },
3805 { 0x3c14, 0, 0x00000000, 0xffffffff },
3806 { 0x3c18, 0, 0x00000000, 0xffffffff },
3807 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
3808 { 0x3c20, 0, 0xffffff00, 0x00000000 },
b6016b76
MC
3809
3810 { 0x5004, 0, 0x00000000, 0x0000007f },
3811 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
3812 { 0x500c, 0, 0xf800f800, 0x07ff07ff },
3813
b6016b76
MC
3814 { 0x5c00, 0, 0x00000000, 0x00000001 },
3815 { 0x5c04, 0, 0x00000000, 0x0003000f },
3816 { 0x5c08, 0, 0x00000003, 0x00000000 },
3817 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
3818 { 0x5c10, 0, 0x00000000, 0xffffffff },
3819 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
3820 { 0x5c84, 0, 0x00000000, 0x0000f333 },
3821 { 0x5c88, 0, 0x00000000, 0x00077373 },
3822 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
3823
3824 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
3825 { 0x680c, 0, 0xffffffff, 0x00000000 },
3826 { 0x6810, 0, 0xffffffff, 0x00000000 },
3827 { 0x6814, 0, 0xffffffff, 0x00000000 },
3828 { 0x6818, 0, 0xffffffff, 0x00000000 },
3829 { 0x681c, 0, 0xffffffff, 0x00000000 },
3830 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
3831 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
3832 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
3833 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
3834 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
3835 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
3836 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
3837 { 0x683c, 0, 0x0000ffff, 0x00000000 },
3838 { 0x6840, 0, 0x00000ff0, 0x00000000 },
3839 { 0x6844, 0, 0x00ffff00, 0x00000000 },
3840 { 0x684c, 0, 0xffffffff, 0x00000000 },
3841 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
3842 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
3843 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
3844 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
3845 { 0x6908, 0, 0x00000000, 0x0001ff0f },
3846 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
3847
3848 { 0xffff, 0, 0x00000000, 0x00000000 },
3849 };
3850
3851 ret = 0;
3852 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
3853 u32 offset, rw_mask, ro_mask, save_val, val;
3854
3855 offset = (u32) reg_tbl[i].offset;
3856 rw_mask = reg_tbl[i].rw_mask;
3857 ro_mask = reg_tbl[i].ro_mask;
3858
14ab9b86 3859 save_val = readl(bp->regview + offset);
b6016b76 3860
14ab9b86 3861 writel(0, bp->regview + offset);
b6016b76 3862
14ab9b86 3863 val = readl(bp->regview + offset);
b6016b76
MC
3864 if ((val & rw_mask) != 0) {
3865 goto reg_test_err;
3866 }
3867
3868 if ((val & ro_mask) != (save_val & ro_mask)) {
3869 goto reg_test_err;
3870 }
3871
14ab9b86 3872 writel(0xffffffff, bp->regview + offset);
b6016b76 3873
14ab9b86 3874 val = readl(bp->regview + offset);
b6016b76
MC
3875 if ((val & rw_mask) != rw_mask) {
3876 goto reg_test_err;
3877 }
3878
3879 if ((val & ro_mask) != (save_val & ro_mask)) {
3880 goto reg_test_err;
3881 }
3882
14ab9b86 3883 writel(save_val, bp->regview + offset);
b6016b76
MC
3884 continue;
3885
3886reg_test_err:
14ab9b86 3887 writel(save_val, bp->regview + offset);
b6016b76
MC
3888 ret = -ENODEV;
3889 break;
3890 }
3891 return ret;
3892}
3893
3894static int
3895bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
3896{
f71e1309 3897 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
b6016b76
MC
3898 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
3899 int i;
3900
3901 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
3902 u32 offset;
3903
3904 for (offset = 0; offset < size; offset += 4) {
3905
3906 REG_WR_IND(bp, start + offset, test_pattern[i]);
3907
3908 if (REG_RD_IND(bp, start + offset) !=
3909 test_pattern[i]) {
3910 return -ENODEV;
3911 }
3912 }
3913 }
3914 return 0;
3915}
3916
3917static int
3918bnx2_test_memory(struct bnx2 *bp)
3919{
3920 int ret = 0;
3921 int i;
f71e1309 3922 static const struct {
b6016b76
MC
3923 u32 offset;
3924 u32 len;
3925 } mem_tbl[] = {
3926 { 0x60000, 0x4000 },
5b0c76ad 3927 { 0xa0000, 0x3000 },
b6016b76
MC
3928 { 0xe0000, 0x4000 },
3929 { 0x120000, 0x4000 },
3930 { 0x1a0000, 0x4000 },
3931 { 0x160000, 0x4000 },
3932 { 0xffffffff, 0 },
3933 };
3934
3935 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
3936 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
3937 mem_tbl[i].len)) != 0) {
3938 return ret;
3939 }
3940 }
6aa20a22 3941
b6016b76
MC
3942 return ret;
3943}
3944
bc5a0690
MC
3945#define BNX2_MAC_LOOPBACK 0
3946#define BNX2_PHY_LOOPBACK 1
3947
b6016b76 3948static int
bc5a0690 3949bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
b6016b76
MC
3950{
3951 unsigned int pkt_size, num_pkts, i;
3952 struct sk_buff *skb, *rx_skb;
3953 unsigned char *packet;
bc5a0690 3954 u16 rx_start_idx, rx_idx;
b6016b76
MC
3955 dma_addr_t map;
3956 struct tx_bd *txbd;
3957 struct sw_bd *rx_buf;
3958 struct l2_fhdr *rx_hdr;
3959 int ret = -ENODEV;
3960
bc5a0690
MC
3961 if (loopback_mode == BNX2_MAC_LOOPBACK) {
3962 bp->loopback = MAC_LOOPBACK;
3963 bnx2_set_mac_loopback(bp);
3964 }
3965 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
80be4434 3966 bp->loopback = PHY_LOOPBACK;
bc5a0690
MC
3967 bnx2_set_phy_loopback(bp);
3968 }
3969 else
3970 return -EINVAL;
b6016b76
MC
3971
3972 pkt_size = 1514;
932f3772 3973 skb = netdev_alloc_skb(bp->dev, pkt_size);
b6cbc3b6
JL
3974 if (!skb)
3975 return -ENOMEM;
b6016b76
MC
3976 packet = skb_put(skb, pkt_size);
3977 memcpy(packet, bp->mac_addr, 6);
3978 memset(packet + 6, 0x0, 8);
3979 for (i = 14; i < pkt_size; i++)
3980 packet[i] = (unsigned char) (i & 0xff);
3981
3982 map = pci_map_single(bp->pdev, skb->data, pkt_size,
3983 PCI_DMA_TODEVICE);
3984
bf5295bb
MC
3985 REG_WR(bp, BNX2_HC_COMMAND,
3986 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3987
b6016b76
MC
3988 REG_RD(bp, BNX2_HC_COMMAND);
3989
3990 udelay(5);
3991 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
3992
b6016b76
MC
3993 num_pkts = 0;
3994
bc5a0690 3995 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
b6016b76
MC
3996
3997 txbd->tx_bd_haddr_hi = (u64) map >> 32;
3998 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
3999 txbd->tx_bd_mss_nbytes = pkt_size;
4000 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4001
4002 num_pkts++;
bc5a0690
MC
4003 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4004 bp->tx_prod_bseq += pkt_size;
b6016b76 4005
bc5a0690
MC
4006 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, bp->tx_prod);
4007 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
b6016b76
MC
4008
4009 udelay(100);
4010
bf5295bb
MC
4011 REG_WR(bp, BNX2_HC_COMMAND,
4012 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4013
b6016b76
MC
4014 REG_RD(bp, BNX2_HC_COMMAND);
4015
4016 udelay(5);
4017
4018 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
745720e5 4019 dev_kfree_skb(skb);
b6016b76 4020
bc5a0690 4021 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
b6016b76
MC
4022 goto loopback_test_done;
4023 }
4024
4025 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4026 if (rx_idx != rx_start_idx + num_pkts) {
4027 goto loopback_test_done;
4028 }
4029
4030 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4031 rx_skb = rx_buf->skb;
4032
4033 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4034 skb_reserve(rx_skb, bp->rx_offset);
4035
4036 pci_dma_sync_single_for_cpu(bp->pdev,
4037 pci_unmap_addr(rx_buf, mapping),
4038 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4039
ade2bfe7 4040 if (rx_hdr->l2_fhdr_status &
b6016b76
MC
4041 (L2_FHDR_ERRORS_BAD_CRC |
4042 L2_FHDR_ERRORS_PHY_DECODE |
4043 L2_FHDR_ERRORS_ALIGNMENT |
4044 L2_FHDR_ERRORS_TOO_SHORT |
4045 L2_FHDR_ERRORS_GIANT_FRAME)) {
4046
4047 goto loopback_test_done;
4048 }
4049
4050 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4051 goto loopback_test_done;
4052 }
4053
4054 for (i = 14; i < pkt_size; i++) {
4055 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4056 goto loopback_test_done;
4057 }
4058 }
4059
4060 ret = 0;
4061
4062loopback_test_done:
4063 bp->loopback = 0;
4064 return ret;
4065}
4066
bc5a0690
MC
4067#define BNX2_MAC_LOOPBACK_FAILED 1
4068#define BNX2_PHY_LOOPBACK_FAILED 2
4069#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4070 BNX2_PHY_LOOPBACK_FAILED)
4071
4072static int
4073bnx2_test_loopback(struct bnx2 *bp)
4074{
4075 int rc = 0;
4076
4077 if (!netif_running(bp->dev))
4078 return BNX2_LOOPBACK_FAILED;
4079
4080 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4081 spin_lock_bh(&bp->phy_lock);
4082 bnx2_init_phy(bp);
4083 spin_unlock_bh(&bp->phy_lock);
4084 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4085 rc |= BNX2_MAC_LOOPBACK_FAILED;
4086 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4087 rc |= BNX2_PHY_LOOPBACK_FAILED;
4088 return rc;
4089}
4090
b6016b76
MC
4091#define NVRAM_SIZE 0x200
4092#define CRC32_RESIDUAL 0xdebb20e3
4093
4094static int
4095bnx2_test_nvram(struct bnx2 *bp)
4096{
4097 u32 buf[NVRAM_SIZE / 4];
4098 u8 *data = (u8 *) buf;
4099 int rc = 0;
4100 u32 magic, csum;
4101
4102 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4103 goto test_nvram_done;
4104
4105 magic = be32_to_cpu(buf[0]);
4106 if (magic != 0x669955aa) {
4107 rc = -ENODEV;
4108 goto test_nvram_done;
4109 }
4110
4111 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4112 goto test_nvram_done;
4113
4114 csum = ether_crc_le(0x100, data);
4115 if (csum != CRC32_RESIDUAL) {
4116 rc = -ENODEV;
4117 goto test_nvram_done;
4118 }
4119
4120 csum = ether_crc_le(0x100, data + 0x100);
4121 if (csum != CRC32_RESIDUAL) {
4122 rc = -ENODEV;
4123 }
4124
4125test_nvram_done:
4126 return rc;
4127}
4128
4129static int
4130bnx2_test_link(struct bnx2 *bp)
4131{
4132 u32 bmsr;
4133
c770a65c 4134 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
4135 bnx2_read_phy(bp, MII_BMSR, &bmsr);
4136 bnx2_read_phy(bp, MII_BMSR, &bmsr);
c770a65c 4137 spin_unlock_bh(&bp->phy_lock);
6aa20a22 4138
b6016b76
MC
4139 if (bmsr & BMSR_LSTATUS) {
4140 return 0;
4141 }
4142 return -ENODEV;
4143}
4144
4145static int
4146bnx2_test_intr(struct bnx2 *bp)
4147{
4148 int i;
b6016b76
MC
4149 u16 status_idx;
4150
4151 if (!netif_running(bp->dev))
4152 return -ENODEV;
4153
4154 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4155
4156 /* This register is not touched during run-time. */
bf5295bb 4157 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
b6016b76
MC
4158 REG_RD(bp, BNX2_HC_COMMAND);
4159
4160 for (i = 0; i < 10; i++) {
4161 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4162 status_idx) {
4163
4164 break;
4165 }
4166
4167 msleep_interruptible(10);
4168 }
4169 if (i < 10)
4170 return 0;
4171
4172 return -ENODEV;
4173}
4174
4175static void
4176bnx2_timer(unsigned long data)
4177{
4178 struct bnx2 *bp = (struct bnx2 *) data;
4179 u32 msg;
4180
cd339a0e
MC
4181 if (!netif_running(bp->dev))
4182 return;
4183
b6016b76
MC
4184 if (atomic_read(&bp->intr_sem) != 0)
4185 goto bnx2_restart_timer;
4186
4187 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
e3648b3d 4188 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
b6016b76 4189
cea94db9
MC
4190 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
4191
b6016b76
MC
4192 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
4193 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
b6016b76 4194
c770a65c 4195 spin_lock(&bp->phy_lock);
b6016b76
MC
4196 if (bp->serdes_an_pending) {
4197 bp->serdes_an_pending--;
4198 }
4199 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4200 u32 bmcr;
4201
cd339a0e
MC
4202 bp->current_interval = bp->timer_interval;
4203
b6016b76
MC
4204 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4205
4206 if (bmcr & BMCR_ANENABLE) {
4207 u32 phy1, phy2;
4208
4209 bnx2_write_phy(bp, 0x1c, 0x7c00);
4210 bnx2_read_phy(bp, 0x1c, &phy1);
4211
4212 bnx2_write_phy(bp, 0x17, 0x0f01);
4213 bnx2_read_phy(bp, 0x15, &phy2);
4214 bnx2_write_phy(bp, 0x17, 0x0f01);
4215 bnx2_read_phy(bp, 0x15, &phy2);
4216
4217 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4218 !(phy2 & 0x20)) { /* no CONFIG */
4219
4220 bmcr &= ~BMCR_ANENABLE;
4221 bmcr |= BMCR_SPEED1000 |
4222 BMCR_FULLDPLX;
4223 bnx2_write_phy(bp, MII_BMCR, bmcr);
4224 bp->phy_flags |=
4225 PHY_PARALLEL_DETECT_FLAG;
4226 }
4227 }
4228 }
4229 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4230 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4231 u32 phy2;
4232
4233 bnx2_write_phy(bp, 0x17, 0x0f01);
4234 bnx2_read_phy(bp, 0x15, &phy2);
4235 if (phy2 & 0x20) {
4236 u32 bmcr;
4237
4238 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4239 bmcr |= BMCR_ANENABLE;
4240 bnx2_write_phy(bp, MII_BMCR, bmcr);
4241
4242 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4243
4244 }
4245 }
cd339a0e
MC
4246 else
4247 bp->current_interval = bp->timer_interval;
b6016b76 4248
c770a65c 4249 spin_unlock(&bp->phy_lock);
b6016b76
MC
4250 }
4251
4252bnx2_restart_timer:
cd339a0e 4253 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
4254}
4255
4256/* Called with rtnl_lock */
4257static int
4258bnx2_open(struct net_device *dev)
4259{
972ec0d4 4260 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4261 int rc;
4262
829ca9a3 4263 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
4264 bnx2_disable_int(bp);
4265
4266 rc = bnx2_alloc_mem(bp);
4267 if (rc)
4268 return rc;
4269
4270 if ((CHIP_ID(bp) != CHIP_ID_5706_A0) &&
4271 (CHIP_ID(bp) != CHIP_ID_5706_A1) &&
4272 !disable_msi) {
4273
4274 if (pci_enable_msi(bp->pdev) == 0) {
4275 bp->flags |= USING_MSI_FLAG;
4276 rc = request_irq(bp->pdev->irq, bnx2_msi, 0, dev->name,
4277 dev);
4278 }
4279 else {
4280 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
1fb9df5d 4281 IRQF_SHARED, dev->name, dev);
b6016b76
MC
4282 }
4283 }
4284 else {
1fb9df5d 4285 rc = request_irq(bp->pdev->irq, bnx2_interrupt, IRQF_SHARED,
b6016b76
MC
4286 dev->name, dev);
4287 }
4288 if (rc) {
4289 bnx2_free_mem(bp);
4290 return rc;
4291 }
4292
4293 rc = bnx2_init_nic(bp);
4294
4295 if (rc) {
4296 free_irq(bp->pdev->irq, dev);
4297 if (bp->flags & USING_MSI_FLAG) {
4298 pci_disable_msi(bp->pdev);
4299 bp->flags &= ~USING_MSI_FLAG;
4300 }
4301 bnx2_free_skbs(bp);
4302 bnx2_free_mem(bp);
4303 return rc;
4304 }
6aa20a22 4305
cd339a0e 4306 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
4307
4308 atomic_set(&bp->intr_sem, 0);
4309
4310 bnx2_enable_int(bp);
4311
4312 if (bp->flags & USING_MSI_FLAG) {
4313 /* Test MSI to make sure it is working
4314 * If MSI test fails, go back to INTx mode
4315 */
4316 if (bnx2_test_intr(bp) != 0) {
4317 printk(KERN_WARNING PFX "%s: No interrupt was generated"
4318 " using MSI, switching to INTx mode. Please"
4319 " report this failure to the PCI maintainer"
4320 " and include system chipset information.\n",
4321 bp->dev->name);
4322
4323 bnx2_disable_int(bp);
4324 free_irq(bp->pdev->irq, dev);
4325 pci_disable_msi(bp->pdev);
4326 bp->flags &= ~USING_MSI_FLAG;
4327
4328 rc = bnx2_init_nic(bp);
4329
4330 if (!rc) {
4331 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
1fb9df5d 4332 IRQF_SHARED, dev->name, dev);
b6016b76
MC
4333 }
4334 if (rc) {
4335 bnx2_free_skbs(bp);
4336 bnx2_free_mem(bp);
4337 del_timer_sync(&bp->timer);
4338 return rc;
4339 }
4340 bnx2_enable_int(bp);
4341 }
4342 }
4343 if (bp->flags & USING_MSI_FLAG) {
4344 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
4345 }
4346
4347 netif_start_queue(dev);
4348
4349 return 0;
4350}
4351
4352static void
4353bnx2_reset_task(void *data)
4354{
4355 struct bnx2 *bp = data;
4356
afdc08b9
MC
4357 if (!netif_running(bp->dev))
4358 return;
4359
4360 bp->in_reset_task = 1;
b6016b76
MC
4361 bnx2_netif_stop(bp);
4362
4363 bnx2_init_nic(bp);
4364
4365 atomic_set(&bp->intr_sem, 1);
4366 bnx2_netif_start(bp);
afdc08b9 4367 bp->in_reset_task = 0;
b6016b76
MC
4368}
4369
4370static void
4371bnx2_tx_timeout(struct net_device *dev)
4372{
972ec0d4 4373 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4374
4375 /* This allows the netif to be shutdown gracefully before resetting */
4376 schedule_work(&bp->reset_task);
4377}
4378
4379#ifdef BCM_VLAN
4380/* Called with rtnl_lock */
4381static void
4382bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
4383{
972ec0d4 4384 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4385
4386 bnx2_netif_stop(bp);
4387
4388 bp->vlgrp = vlgrp;
4389 bnx2_set_rx_mode(dev);
4390
4391 bnx2_netif_start(bp);
4392}
4393
4394/* Called with rtnl_lock */
4395static void
4396bnx2_vlan_rx_kill_vid(struct net_device *dev, uint16_t vid)
4397{
972ec0d4 4398 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4399
4400 bnx2_netif_stop(bp);
4401
4402 if (bp->vlgrp)
4403 bp->vlgrp->vlan_devices[vid] = NULL;
4404 bnx2_set_rx_mode(dev);
4405
4406 bnx2_netif_start(bp);
4407}
4408#endif
4409
932ff279 4410/* Called with netif_tx_lock.
2f8af120
MC
4411 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
4412 * netif_wake_queue().
b6016b76
MC
4413 */
4414static int
4415bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
4416{
972ec0d4 4417 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4418 dma_addr_t mapping;
4419 struct tx_bd *txbd;
4420 struct sw_bd *tx_buf;
4421 u32 len, vlan_tag_flags, last_frag, mss;
4422 u16 prod, ring_prod;
4423 int i;
4424
e89bbf10 4425 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
b6016b76
MC
4426 netif_stop_queue(dev);
4427 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
4428 dev->name);
4429
4430 return NETDEV_TX_BUSY;
4431 }
4432 len = skb_headlen(skb);
4433 prod = bp->tx_prod;
4434 ring_prod = TX_RING_IDX(prod);
4435
4436 vlan_tag_flags = 0;
84fa7933 4437 if (skb->ip_summed == CHECKSUM_PARTIAL) {
b6016b76
MC
4438 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
4439 }
4440
4441 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
4442 vlan_tag_flags |=
4443 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
4444 }
6aa20a22 4445#ifdef BCM_TSO
7967168c 4446 if ((mss = skb_shinfo(skb)->gso_size) &&
b6016b76
MC
4447 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
4448 u32 tcp_opt_len, ip_tcp_len;
4449
4450 if (skb_header_cloned(skb) &&
4451 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
4452 dev_kfree_skb(skb);
4453 return NETDEV_TX_OK;
4454 }
4455
4456 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
4457 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
4458
4459 tcp_opt_len = 0;
4460 if (skb->h.th->doff > 5) {
4461 tcp_opt_len = (skb->h.th->doff - 5) << 2;
4462 }
4463 ip_tcp_len = (skb->nh.iph->ihl << 2) + sizeof(struct tcphdr);
4464
4465 skb->nh.iph->check = 0;
d1e100ba 4466 skb->nh.iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
b6016b76
MC
4467 skb->h.th->check =
4468 ~csum_tcpudp_magic(skb->nh.iph->saddr,
4469 skb->nh.iph->daddr,
4470 0, IPPROTO_TCP, 0);
4471
4472 if (tcp_opt_len || (skb->nh.iph->ihl > 5)) {
4473 vlan_tag_flags |= ((skb->nh.iph->ihl - 5) +
4474 (tcp_opt_len >> 2)) << 8;
4475 }
4476 }
4477 else
4478#endif
4479 {
4480 mss = 0;
4481 }
4482
4483 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6aa20a22 4484
b6016b76
MC
4485 tx_buf = &bp->tx_buf_ring[ring_prod];
4486 tx_buf->skb = skb;
4487 pci_unmap_addr_set(tx_buf, mapping, mapping);
4488
4489 txbd = &bp->tx_desc_ring[ring_prod];
4490
4491 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4492 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4493 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4494 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
4495
4496 last_frag = skb_shinfo(skb)->nr_frags;
4497
4498 for (i = 0; i < last_frag; i++) {
4499 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
4500
4501 prod = NEXT_TX_BD(prod);
4502 ring_prod = TX_RING_IDX(prod);
4503 txbd = &bp->tx_desc_ring[ring_prod];
4504
4505 len = frag->size;
4506 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
4507 len, PCI_DMA_TODEVICE);
4508 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
4509 mapping, mapping);
4510
4511 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
4512 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
4513 txbd->tx_bd_mss_nbytes = len | (mss << 16);
4514 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
4515
4516 }
4517 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
4518
4519 prod = NEXT_TX_BD(prod);
4520 bp->tx_prod_bseq += skb->len;
4521
b6016b76
MC
4522 REG_WR16(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BIDX, prod);
4523 REG_WR(bp, MB_TX_CID_ADDR + BNX2_L2CTX_TX_HOST_BSEQ, bp->tx_prod_bseq);
4524
4525 mmiowb();
4526
4527 bp->tx_prod = prod;
4528 dev->trans_start = jiffies;
4529
e89bbf10 4530 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
e89bbf10 4531 netif_stop_queue(dev);
2f8af120 4532 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
e89bbf10 4533 netif_wake_queue(dev);
b6016b76
MC
4534 }
4535
4536 return NETDEV_TX_OK;
4537}
4538
4539/* Called with rtnl_lock */
4540static int
4541bnx2_close(struct net_device *dev)
4542{
972ec0d4 4543 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4544 u32 reset_code;
4545
afdc08b9
MC
4546 /* Calling flush_scheduled_work() may deadlock because
4547 * linkwatch_event() may be on the workqueue and it will try to get
4548 * the rtnl_lock which we are holding.
4549 */
4550 while (bp->in_reset_task)
4551 msleep(1);
4552
b6016b76
MC
4553 bnx2_netif_stop(bp);
4554 del_timer_sync(&bp->timer);
dda1e390 4555 if (bp->flags & NO_WOL_FLAG)
6c4f095e 4556 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
dda1e390 4557 else if (bp->wol)
b6016b76
MC
4558 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
4559 else
4560 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
4561 bnx2_reset_chip(bp, reset_code);
4562 free_irq(bp->pdev->irq, dev);
4563 if (bp->flags & USING_MSI_FLAG) {
4564 pci_disable_msi(bp->pdev);
4565 bp->flags &= ~USING_MSI_FLAG;
4566 }
4567 bnx2_free_skbs(bp);
4568 bnx2_free_mem(bp);
4569 bp->link_up = 0;
4570 netif_carrier_off(bp->dev);
829ca9a3 4571 bnx2_set_power_state(bp, PCI_D3hot);
b6016b76
MC
4572 return 0;
4573}
4574
4575#define GET_NET_STATS64(ctr) \
4576 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
4577 (unsigned long) (ctr##_lo)
4578
4579#define GET_NET_STATS32(ctr) \
4580 (ctr##_lo)
4581
4582#if (BITS_PER_LONG == 64)
4583#define GET_NET_STATS GET_NET_STATS64
4584#else
4585#define GET_NET_STATS GET_NET_STATS32
4586#endif
4587
4588static struct net_device_stats *
4589bnx2_get_stats(struct net_device *dev)
4590{
972ec0d4 4591 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4592 struct statistics_block *stats_blk = bp->stats_blk;
4593 struct net_device_stats *net_stats = &bp->net_stats;
4594
4595 if (bp->stats_blk == NULL) {
4596 return net_stats;
4597 }
4598 net_stats->rx_packets =
4599 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
4600 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
4601 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
4602
4603 net_stats->tx_packets =
4604 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
4605 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
4606 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
4607
4608 net_stats->rx_bytes =
4609 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
4610
4611 net_stats->tx_bytes =
4612 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
4613
6aa20a22 4614 net_stats->multicast =
b6016b76
MC
4615 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
4616
6aa20a22 4617 net_stats->collisions =
b6016b76
MC
4618 (unsigned long) stats_blk->stat_EtherStatsCollisions;
4619
6aa20a22 4620 net_stats->rx_length_errors =
b6016b76
MC
4621 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
4622 stats_blk->stat_EtherStatsOverrsizePkts);
4623
6aa20a22 4624 net_stats->rx_over_errors =
b6016b76
MC
4625 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
4626
6aa20a22 4627 net_stats->rx_frame_errors =
b6016b76
MC
4628 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
4629
6aa20a22 4630 net_stats->rx_crc_errors =
b6016b76
MC
4631 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
4632
4633 net_stats->rx_errors = net_stats->rx_length_errors +
4634 net_stats->rx_over_errors + net_stats->rx_frame_errors +
4635 net_stats->rx_crc_errors;
4636
4637 net_stats->tx_aborted_errors =
4638 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
4639 stats_blk->stat_Dot3StatsLateCollisions);
4640
5b0c76ad
MC
4641 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
4642 (CHIP_ID(bp) == CHIP_ID_5708_A0))
b6016b76
MC
4643 net_stats->tx_carrier_errors = 0;
4644 else {
4645 net_stats->tx_carrier_errors =
4646 (unsigned long)
4647 stats_blk->stat_Dot3StatsCarrierSenseErrors;
4648 }
4649
4650 net_stats->tx_errors =
6aa20a22 4651 (unsigned long)
b6016b76
MC
4652 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
4653 +
4654 net_stats->tx_aborted_errors +
4655 net_stats->tx_carrier_errors;
4656
cea94db9
MC
4657 net_stats->rx_missed_errors =
4658 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
4659 stats_blk->stat_FwRxDrop);
4660
b6016b76
MC
4661 return net_stats;
4662}
4663
4664/* All ethtool functions called with rtnl_lock */
4665
4666static int
4667bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4668{
972ec0d4 4669 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4670
4671 cmd->supported = SUPPORTED_Autoneg;
4672 if (bp->phy_flags & PHY_SERDES_FLAG) {
4673 cmd->supported |= SUPPORTED_1000baseT_Full |
4674 SUPPORTED_FIBRE;
4675
4676 cmd->port = PORT_FIBRE;
4677 }
4678 else {
4679 cmd->supported |= SUPPORTED_10baseT_Half |
4680 SUPPORTED_10baseT_Full |
4681 SUPPORTED_100baseT_Half |
4682 SUPPORTED_100baseT_Full |
4683 SUPPORTED_1000baseT_Full |
4684 SUPPORTED_TP;
4685
4686 cmd->port = PORT_TP;
4687 }
4688
4689 cmd->advertising = bp->advertising;
4690
4691 if (bp->autoneg & AUTONEG_SPEED) {
4692 cmd->autoneg = AUTONEG_ENABLE;
4693 }
4694 else {
4695 cmd->autoneg = AUTONEG_DISABLE;
4696 }
4697
4698 if (netif_carrier_ok(dev)) {
4699 cmd->speed = bp->line_speed;
4700 cmd->duplex = bp->duplex;
4701 }
4702 else {
4703 cmd->speed = -1;
4704 cmd->duplex = -1;
4705 }
4706
4707 cmd->transceiver = XCVR_INTERNAL;
4708 cmd->phy_address = bp->phy_addr;
4709
4710 return 0;
4711}
6aa20a22 4712
b6016b76
MC
4713static int
4714bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
4715{
972ec0d4 4716 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4717 u8 autoneg = bp->autoneg;
4718 u8 req_duplex = bp->req_duplex;
4719 u16 req_line_speed = bp->req_line_speed;
4720 u32 advertising = bp->advertising;
4721
4722 if (cmd->autoneg == AUTONEG_ENABLE) {
4723 autoneg |= AUTONEG_SPEED;
4724
6aa20a22 4725 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
b6016b76
MC
4726
4727 /* allow advertising 1 speed */
4728 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
4729 (cmd->advertising == ADVERTISED_10baseT_Full) ||
4730 (cmd->advertising == ADVERTISED_100baseT_Half) ||
4731 (cmd->advertising == ADVERTISED_100baseT_Full)) {
4732
4733 if (bp->phy_flags & PHY_SERDES_FLAG)
4734 return -EINVAL;
4735
4736 advertising = cmd->advertising;
4737
4738 }
4739 else if (cmd->advertising == ADVERTISED_1000baseT_Full) {
4740 advertising = cmd->advertising;
4741 }
4742 else if (cmd->advertising == ADVERTISED_1000baseT_Half) {
4743 return -EINVAL;
4744 }
4745 else {
4746 if (bp->phy_flags & PHY_SERDES_FLAG) {
4747 advertising = ETHTOOL_ALL_FIBRE_SPEED;
4748 }
4749 else {
4750 advertising = ETHTOOL_ALL_COPPER_SPEED;
4751 }
4752 }
4753 advertising |= ADVERTISED_Autoneg;
4754 }
4755 else {
4756 if (bp->phy_flags & PHY_SERDES_FLAG) {
80be4434
MC
4757 if ((cmd->speed != SPEED_1000 &&
4758 cmd->speed != SPEED_2500) ||
4759 (cmd->duplex != DUPLEX_FULL))
4760 return -EINVAL;
4761
4762 if (cmd->speed == SPEED_2500 &&
4763 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
b6016b76 4764 return -EINVAL;
b6016b76
MC
4765 }
4766 else if (cmd->speed == SPEED_1000) {
4767 return -EINVAL;
4768 }
4769 autoneg &= ~AUTONEG_SPEED;
4770 req_line_speed = cmd->speed;
4771 req_duplex = cmd->duplex;
4772 advertising = 0;
4773 }
4774
4775 bp->autoneg = autoneg;
4776 bp->advertising = advertising;
4777 bp->req_line_speed = req_line_speed;
4778 bp->req_duplex = req_duplex;
4779
c770a65c 4780 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
4781
4782 bnx2_setup_phy(bp);
4783
c770a65c 4784 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
4785
4786 return 0;
4787}
4788
4789static void
4790bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
4791{
972ec0d4 4792 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4793
4794 strcpy(info->driver, DRV_MODULE_NAME);
4795 strcpy(info->version, DRV_MODULE_VERSION);
4796 strcpy(info->bus_info, pci_name(bp->pdev));
4797 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
4798 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
4799 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
206cc83c
MC
4800 info->fw_version[1] = info->fw_version[3] = '.';
4801 info->fw_version[5] = 0;
b6016b76
MC
4802}
4803
244ac4f4
MC
4804#define BNX2_REGDUMP_LEN (32 * 1024)
4805
4806static int
4807bnx2_get_regs_len(struct net_device *dev)
4808{
4809 return BNX2_REGDUMP_LEN;
4810}
4811
4812static void
4813bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
4814{
4815 u32 *p = _p, i, offset;
4816 u8 *orig_p = _p;
4817 struct bnx2 *bp = netdev_priv(dev);
4818 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
4819 0x0800, 0x0880, 0x0c00, 0x0c10,
4820 0x0c30, 0x0d08, 0x1000, 0x101c,
4821 0x1040, 0x1048, 0x1080, 0x10a4,
4822 0x1400, 0x1490, 0x1498, 0x14f0,
4823 0x1500, 0x155c, 0x1580, 0x15dc,
4824 0x1600, 0x1658, 0x1680, 0x16d8,
4825 0x1800, 0x1820, 0x1840, 0x1854,
4826 0x1880, 0x1894, 0x1900, 0x1984,
4827 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
4828 0x1c80, 0x1c94, 0x1d00, 0x1d84,
4829 0x2000, 0x2030, 0x23c0, 0x2400,
4830 0x2800, 0x2820, 0x2830, 0x2850,
4831 0x2b40, 0x2c10, 0x2fc0, 0x3058,
4832 0x3c00, 0x3c94, 0x4000, 0x4010,
4833 0x4080, 0x4090, 0x43c0, 0x4458,
4834 0x4c00, 0x4c18, 0x4c40, 0x4c54,
4835 0x4fc0, 0x5010, 0x53c0, 0x5444,
4836 0x5c00, 0x5c18, 0x5c80, 0x5c90,
4837 0x5fc0, 0x6000, 0x6400, 0x6428,
4838 0x6800, 0x6848, 0x684c, 0x6860,
4839 0x6888, 0x6910, 0x8000 };
4840
4841 regs->version = 0;
4842
4843 memset(p, 0, BNX2_REGDUMP_LEN);
4844
4845 if (!netif_running(bp->dev))
4846 return;
4847
4848 i = 0;
4849 offset = reg_boundaries[0];
4850 p += offset;
4851 while (offset < BNX2_REGDUMP_LEN) {
4852 *p++ = REG_RD(bp, offset);
4853 offset += 4;
4854 if (offset == reg_boundaries[i + 1]) {
4855 offset = reg_boundaries[i + 2];
4856 p = (u32 *) (orig_p + offset);
4857 i += 2;
4858 }
4859 }
4860}
4861
b6016b76
MC
4862static void
4863bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4864{
972ec0d4 4865 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4866
4867 if (bp->flags & NO_WOL_FLAG) {
4868 wol->supported = 0;
4869 wol->wolopts = 0;
4870 }
4871 else {
4872 wol->supported = WAKE_MAGIC;
4873 if (bp->wol)
4874 wol->wolopts = WAKE_MAGIC;
4875 else
4876 wol->wolopts = 0;
4877 }
4878 memset(&wol->sopass, 0, sizeof(wol->sopass));
4879}
4880
4881static int
4882bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
4883{
972ec0d4 4884 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4885
4886 if (wol->wolopts & ~WAKE_MAGIC)
4887 return -EINVAL;
4888
4889 if (wol->wolopts & WAKE_MAGIC) {
4890 if (bp->flags & NO_WOL_FLAG)
4891 return -EINVAL;
4892
4893 bp->wol = 1;
4894 }
4895 else {
4896 bp->wol = 0;
4897 }
4898 return 0;
4899}
4900
4901static int
4902bnx2_nway_reset(struct net_device *dev)
4903{
972ec0d4 4904 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4905 u32 bmcr;
4906
4907 if (!(bp->autoneg & AUTONEG_SPEED)) {
4908 return -EINVAL;
4909 }
4910
c770a65c 4911 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
4912
4913 /* Force a link down visible on the other side */
4914 if (bp->phy_flags & PHY_SERDES_FLAG) {
4915 bnx2_write_phy(bp, MII_BMCR, BMCR_LOOPBACK);
c770a65c 4916 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
4917
4918 msleep(20);
4919
c770a65c 4920 spin_lock_bh(&bp->phy_lock);
b6016b76 4921 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
cd339a0e
MC
4922 bp->current_interval = SERDES_AN_TIMEOUT;
4923 bp->serdes_an_pending = 1;
4924 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
4925 }
4926 }
4927
4928 bnx2_read_phy(bp, MII_BMCR, &bmcr);
4929 bmcr &= ~BMCR_LOOPBACK;
4930 bnx2_write_phy(bp, MII_BMCR, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
4931
c770a65c 4932 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
4933
4934 return 0;
4935}
4936
4937static int
4938bnx2_get_eeprom_len(struct net_device *dev)
4939{
972ec0d4 4940 struct bnx2 *bp = netdev_priv(dev);
b6016b76 4941
1122db71 4942 if (bp->flash_info == NULL)
b6016b76
MC
4943 return 0;
4944
1122db71 4945 return (int) bp->flash_size;
b6016b76
MC
4946}
4947
4948static int
4949bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4950 u8 *eebuf)
4951{
972ec0d4 4952 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4953 int rc;
4954
1064e944 4955 /* parameters already validated in ethtool_get_eeprom */
b6016b76
MC
4956
4957 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
4958
4959 return rc;
4960}
4961
4962static int
4963bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
4964 u8 *eebuf)
4965{
972ec0d4 4966 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4967 int rc;
4968
1064e944 4969 /* parameters already validated in ethtool_set_eeprom */
b6016b76
MC
4970
4971 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
4972
4973 return rc;
4974}
4975
4976static int
4977bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
4978{
972ec0d4 4979 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4980
4981 memset(coal, 0, sizeof(struct ethtool_coalesce));
4982
4983 coal->rx_coalesce_usecs = bp->rx_ticks;
4984 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
4985 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
4986 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
4987
4988 coal->tx_coalesce_usecs = bp->tx_ticks;
4989 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
4990 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
4991 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
4992
4993 coal->stats_block_coalesce_usecs = bp->stats_ticks;
4994
4995 return 0;
4996}
4997
4998static int
4999bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5000{
972ec0d4 5001 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5002
5003 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5004 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5005
6aa20a22 5006 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
b6016b76
MC
5007 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5008
5009 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5010 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5011
5012 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5013 if (bp->rx_quick_cons_trip_int > 0xff)
5014 bp->rx_quick_cons_trip_int = 0xff;
5015
5016 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5017 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5018
5019 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5020 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5021
5022 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5023 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5024
5025 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5026 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5027 0xff;
5028
5029 bp->stats_ticks = coal->stats_block_coalesce_usecs;
5030 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5031 bp->stats_ticks &= 0xffff00;
5032
5033 if (netif_running(bp->dev)) {
5034 bnx2_netif_stop(bp);
5035 bnx2_init_nic(bp);
5036 bnx2_netif_start(bp);
5037 }
5038
5039 return 0;
5040}
5041
5042static void
5043bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5044{
972ec0d4 5045 struct bnx2 *bp = netdev_priv(dev);
b6016b76 5046
13daffa2 5047 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
b6016b76
MC
5048 ering->rx_mini_max_pending = 0;
5049 ering->rx_jumbo_max_pending = 0;
5050
5051 ering->rx_pending = bp->rx_ring_size;
5052 ering->rx_mini_pending = 0;
5053 ering->rx_jumbo_pending = 0;
5054
5055 ering->tx_max_pending = MAX_TX_DESC_CNT;
5056 ering->tx_pending = bp->tx_ring_size;
5057}
5058
5059static int
5060bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5061{
972ec0d4 5062 struct bnx2 *bp = netdev_priv(dev);
b6016b76 5063
13daffa2 5064 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
b6016b76
MC
5065 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5066 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5067
5068 return -EINVAL;
5069 }
13daffa2
MC
5070 if (netif_running(bp->dev)) {
5071 bnx2_netif_stop(bp);
5072 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5073 bnx2_free_skbs(bp);
5074 bnx2_free_mem(bp);
5075 }
5076
5077 bnx2_set_rx_ring_size(bp, ering->rx_pending);
b6016b76
MC
5078 bp->tx_ring_size = ering->tx_pending;
5079
5080 if (netif_running(bp->dev)) {
13daffa2
MC
5081 int rc;
5082
5083 rc = bnx2_alloc_mem(bp);
5084 if (rc)
5085 return rc;
b6016b76
MC
5086 bnx2_init_nic(bp);
5087 bnx2_netif_start(bp);
5088 }
5089
5090 return 0;
5091}
5092
5093static void
5094bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5095{
972ec0d4 5096 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5097
5098 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5099 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5100 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5101}
5102
5103static int
5104bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5105{
972ec0d4 5106 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5107
5108 bp->req_flow_ctrl = 0;
5109 if (epause->rx_pause)
5110 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5111 if (epause->tx_pause)
5112 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5113
5114 if (epause->autoneg) {
5115 bp->autoneg |= AUTONEG_FLOW_CTRL;
5116 }
5117 else {
5118 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5119 }
5120
c770a65c 5121 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
5122
5123 bnx2_setup_phy(bp);
5124
c770a65c 5125 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5126
5127 return 0;
5128}
5129
5130static u32
5131bnx2_get_rx_csum(struct net_device *dev)
5132{
972ec0d4 5133 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5134
5135 return bp->rx_csum;
5136}
5137
5138static int
5139bnx2_set_rx_csum(struct net_device *dev, u32 data)
5140{
972ec0d4 5141 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5142
5143 bp->rx_csum = data;
5144 return 0;
5145}
5146
b11d6213
MC
5147static int
5148bnx2_set_tso(struct net_device *dev, u32 data)
5149{
5150 if (data)
5151 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
5152 else
5153 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
5154 return 0;
5155}
5156
cea94db9 5157#define BNX2_NUM_STATS 46
b6016b76 5158
14ab9b86 5159static struct {
b6016b76
MC
5160 char string[ETH_GSTRING_LEN];
5161} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5162 { "rx_bytes" },
5163 { "rx_error_bytes" },
5164 { "tx_bytes" },
5165 { "tx_error_bytes" },
5166 { "rx_ucast_packets" },
5167 { "rx_mcast_packets" },
5168 { "rx_bcast_packets" },
5169 { "tx_ucast_packets" },
5170 { "tx_mcast_packets" },
5171 { "tx_bcast_packets" },
5172 { "tx_mac_errors" },
5173 { "tx_carrier_errors" },
5174 { "rx_crc_errors" },
5175 { "rx_align_errors" },
5176 { "tx_single_collisions" },
5177 { "tx_multi_collisions" },
5178 { "tx_deferred" },
5179 { "tx_excess_collisions" },
5180 { "tx_late_collisions" },
5181 { "tx_total_collisions" },
5182 { "rx_fragments" },
5183 { "rx_jabbers" },
5184 { "rx_undersize_packets" },
5185 { "rx_oversize_packets" },
5186 { "rx_64_byte_packets" },
5187 { "rx_65_to_127_byte_packets" },
5188 { "rx_128_to_255_byte_packets" },
5189 { "rx_256_to_511_byte_packets" },
5190 { "rx_512_to_1023_byte_packets" },
5191 { "rx_1024_to_1522_byte_packets" },
5192 { "rx_1523_to_9022_byte_packets" },
5193 { "tx_64_byte_packets" },
5194 { "tx_65_to_127_byte_packets" },
5195 { "tx_128_to_255_byte_packets" },
5196 { "tx_256_to_511_byte_packets" },
5197 { "tx_512_to_1023_byte_packets" },
5198 { "tx_1024_to_1522_byte_packets" },
5199 { "tx_1523_to_9022_byte_packets" },
5200 { "rx_xon_frames" },
5201 { "rx_xoff_frames" },
5202 { "tx_xon_frames" },
5203 { "tx_xoff_frames" },
5204 { "rx_mac_ctrl_frames" },
5205 { "rx_filtered_packets" },
5206 { "rx_discards" },
cea94db9 5207 { "rx_fw_discards" },
b6016b76
MC
5208};
5209
5210#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5211
f71e1309 5212static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
b6016b76
MC
5213 STATS_OFFSET32(stat_IfHCInOctets_hi),
5214 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5215 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5216 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5217 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5218 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5219 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5220 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5221 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5222 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5223 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6aa20a22
JG
5224 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5225 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5226 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5227 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5228 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5229 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5230 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5231 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5232 STATS_OFFSET32(stat_EtherStatsCollisions),
5233 STATS_OFFSET32(stat_EtherStatsFragments),
5234 STATS_OFFSET32(stat_EtherStatsJabbers),
5235 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5236 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5237 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5238 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5239 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5240 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5241 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5242 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5243 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5244 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5245 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5246 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5247 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5248 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5249 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5250 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5251 STATS_OFFSET32(stat_XonPauseFramesReceived),
5252 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5253 STATS_OFFSET32(stat_OutXonSent),
5254 STATS_OFFSET32(stat_OutXoffSent),
5255 STATS_OFFSET32(stat_MacControlFramesReceived),
5256 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5257 STATS_OFFSET32(stat_IfInMBUFDiscards),
cea94db9 5258 STATS_OFFSET32(stat_FwRxDrop),
b6016b76
MC
5259};
5260
5261/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5262 * skipped because of errata.
6aa20a22 5263 */
14ab9b86 5264static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
b6016b76
MC
5265 8,0,8,8,8,8,8,8,8,8,
5266 4,0,4,4,4,4,4,4,4,4,
5267 4,4,4,4,4,4,4,4,4,4,
5268 4,4,4,4,4,4,4,4,4,4,
cea94db9 5269 4,4,4,4,4,4,
b6016b76
MC
5270};
5271
5b0c76ad
MC
5272static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
5273 8,0,8,8,8,8,8,8,8,8,
5274 4,4,4,4,4,4,4,4,4,4,
5275 4,4,4,4,4,4,4,4,4,4,
5276 4,4,4,4,4,4,4,4,4,4,
cea94db9 5277 4,4,4,4,4,4,
5b0c76ad
MC
5278};
5279
b6016b76
MC
5280#define BNX2_NUM_TESTS 6
5281
14ab9b86 5282static struct {
b6016b76
MC
5283 char string[ETH_GSTRING_LEN];
5284} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
5285 { "register_test (offline)" },
5286 { "memory_test (offline)" },
5287 { "loopback_test (offline)" },
5288 { "nvram_test (online)" },
5289 { "interrupt_test (online)" },
5290 { "link_test (online)" },
5291};
5292
5293static int
5294bnx2_self_test_count(struct net_device *dev)
5295{
5296 return BNX2_NUM_TESTS;
5297}
5298
5299static void
5300bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
5301{
972ec0d4 5302 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5303
5304 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
5305 if (etest->flags & ETH_TEST_FL_OFFLINE) {
80be4434
MC
5306 int i;
5307
b6016b76
MC
5308 bnx2_netif_stop(bp);
5309 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
5310 bnx2_free_skbs(bp);
5311
5312 if (bnx2_test_registers(bp) != 0) {
5313 buf[0] = 1;
5314 etest->flags |= ETH_TEST_FL_FAILED;
5315 }
5316 if (bnx2_test_memory(bp) != 0) {
5317 buf[1] = 1;
5318 etest->flags |= ETH_TEST_FL_FAILED;
5319 }
bc5a0690 5320 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
b6016b76 5321 etest->flags |= ETH_TEST_FL_FAILED;
b6016b76
MC
5322
5323 if (!netif_running(bp->dev)) {
5324 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5325 }
5326 else {
5327 bnx2_init_nic(bp);
5328 bnx2_netif_start(bp);
5329 }
5330
5331 /* wait for link up */
80be4434
MC
5332 for (i = 0; i < 7; i++) {
5333 if (bp->link_up)
5334 break;
5335 msleep_interruptible(1000);
5336 }
b6016b76
MC
5337 }
5338
5339 if (bnx2_test_nvram(bp) != 0) {
5340 buf[3] = 1;
5341 etest->flags |= ETH_TEST_FL_FAILED;
5342 }
5343 if (bnx2_test_intr(bp) != 0) {
5344 buf[4] = 1;
5345 etest->flags |= ETH_TEST_FL_FAILED;
5346 }
5347
5348 if (bnx2_test_link(bp) != 0) {
5349 buf[5] = 1;
5350 etest->flags |= ETH_TEST_FL_FAILED;
5351
5352 }
5353}
5354
5355static void
5356bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
5357{
5358 switch (stringset) {
5359 case ETH_SS_STATS:
5360 memcpy(buf, bnx2_stats_str_arr,
5361 sizeof(bnx2_stats_str_arr));
5362 break;
5363 case ETH_SS_TEST:
5364 memcpy(buf, bnx2_tests_str_arr,
5365 sizeof(bnx2_tests_str_arr));
5366 break;
5367 }
5368}
5369
5370static int
5371bnx2_get_stats_count(struct net_device *dev)
5372{
5373 return BNX2_NUM_STATS;
5374}
5375
5376static void
5377bnx2_get_ethtool_stats(struct net_device *dev,
5378 struct ethtool_stats *stats, u64 *buf)
5379{
972ec0d4 5380 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5381 int i;
5382 u32 *hw_stats = (u32 *) bp->stats_blk;
14ab9b86 5383 u8 *stats_len_arr = NULL;
b6016b76
MC
5384
5385 if (hw_stats == NULL) {
5386 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
5387 return;
5388 }
5389
5b0c76ad
MC
5390 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
5391 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
5392 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
5393 (CHIP_ID(bp) == CHIP_ID_5708_A0))
b6016b76 5394 stats_len_arr = bnx2_5706_stats_len_arr;
5b0c76ad
MC
5395 else
5396 stats_len_arr = bnx2_5708_stats_len_arr;
b6016b76
MC
5397
5398 for (i = 0; i < BNX2_NUM_STATS; i++) {
5399 if (stats_len_arr[i] == 0) {
5400 /* skip this counter */
5401 buf[i] = 0;
5402 continue;
5403 }
5404 if (stats_len_arr[i] == 4) {
5405 /* 4-byte counter */
5406 buf[i] = (u64)
5407 *(hw_stats + bnx2_stats_offset_arr[i]);
5408 continue;
5409 }
5410 /* 8-byte counter */
5411 buf[i] = (((u64) *(hw_stats +
5412 bnx2_stats_offset_arr[i])) << 32) +
5413 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
5414 }
5415}
5416
5417static int
5418bnx2_phys_id(struct net_device *dev, u32 data)
5419{
972ec0d4 5420 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5421 int i;
5422 u32 save;
5423
5424 if (data == 0)
5425 data = 2;
5426
5427 save = REG_RD(bp, BNX2_MISC_CFG);
5428 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
5429
5430 for (i = 0; i < (data * 2); i++) {
5431 if ((i % 2) == 0) {
5432 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
5433 }
5434 else {
5435 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
5436 BNX2_EMAC_LED_1000MB_OVERRIDE |
5437 BNX2_EMAC_LED_100MB_OVERRIDE |
5438 BNX2_EMAC_LED_10MB_OVERRIDE |
5439 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
5440 BNX2_EMAC_LED_TRAFFIC);
5441 }
5442 msleep_interruptible(500);
5443 if (signal_pending(current))
5444 break;
5445 }
5446 REG_WR(bp, BNX2_EMAC_LED, 0);
5447 REG_WR(bp, BNX2_MISC_CFG, save);
5448 return 0;
5449}
5450
7282d491 5451static const struct ethtool_ops bnx2_ethtool_ops = {
b6016b76
MC
5452 .get_settings = bnx2_get_settings,
5453 .set_settings = bnx2_set_settings,
5454 .get_drvinfo = bnx2_get_drvinfo,
244ac4f4
MC
5455 .get_regs_len = bnx2_get_regs_len,
5456 .get_regs = bnx2_get_regs,
b6016b76
MC
5457 .get_wol = bnx2_get_wol,
5458 .set_wol = bnx2_set_wol,
5459 .nway_reset = bnx2_nway_reset,
5460 .get_link = ethtool_op_get_link,
5461 .get_eeprom_len = bnx2_get_eeprom_len,
5462 .get_eeprom = bnx2_get_eeprom,
5463 .set_eeprom = bnx2_set_eeprom,
5464 .get_coalesce = bnx2_get_coalesce,
5465 .set_coalesce = bnx2_set_coalesce,
5466 .get_ringparam = bnx2_get_ringparam,
5467 .set_ringparam = bnx2_set_ringparam,
5468 .get_pauseparam = bnx2_get_pauseparam,
5469 .set_pauseparam = bnx2_set_pauseparam,
5470 .get_rx_csum = bnx2_get_rx_csum,
5471 .set_rx_csum = bnx2_set_rx_csum,
5472 .get_tx_csum = ethtool_op_get_tx_csum,
5473 .set_tx_csum = ethtool_op_set_tx_csum,
5474 .get_sg = ethtool_op_get_sg,
5475 .set_sg = ethtool_op_set_sg,
5476#ifdef BCM_TSO
5477 .get_tso = ethtool_op_get_tso,
b11d6213 5478 .set_tso = bnx2_set_tso,
b6016b76
MC
5479#endif
5480 .self_test_count = bnx2_self_test_count,
5481 .self_test = bnx2_self_test,
5482 .get_strings = bnx2_get_strings,
5483 .phys_id = bnx2_phys_id,
5484 .get_stats_count = bnx2_get_stats_count,
5485 .get_ethtool_stats = bnx2_get_ethtool_stats,
24b8e05d 5486 .get_perm_addr = ethtool_op_get_perm_addr,
b6016b76
MC
5487};
5488
5489/* Called with rtnl_lock */
5490static int
5491bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
5492{
14ab9b86 5493 struct mii_ioctl_data *data = if_mii(ifr);
972ec0d4 5494 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5495 int err;
5496
5497 switch(cmd) {
5498 case SIOCGMIIPHY:
5499 data->phy_id = bp->phy_addr;
5500
5501 /* fallthru */
5502 case SIOCGMIIREG: {
5503 u32 mii_regval;
5504
c770a65c 5505 spin_lock_bh(&bp->phy_lock);
b6016b76 5506 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
c770a65c 5507 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5508
5509 data->val_out = mii_regval;
5510
5511 return err;
5512 }
5513
5514 case SIOCSMIIREG:
5515 if (!capable(CAP_NET_ADMIN))
5516 return -EPERM;
5517
c770a65c 5518 spin_lock_bh(&bp->phy_lock);
b6016b76 5519 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
c770a65c 5520 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5521
5522 return err;
5523
5524 default:
5525 /* do nothing */
5526 break;
5527 }
5528 return -EOPNOTSUPP;
5529}
5530
5531/* Called with rtnl_lock */
5532static int
5533bnx2_change_mac_addr(struct net_device *dev, void *p)
5534{
5535 struct sockaddr *addr = p;
972ec0d4 5536 struct bnx2 *bp = netdev_priv(dev);
b6016b76 5537
73eef4cd
MC
5538 if (!is_valid_ether_addr(addr->sa_data))
5539 return -EINVAL;
5540
b6016b76
MC
5541 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
5542 if (netif_running(dev))
5543 bnx2_set_mac_addr(bp);
5544
5545 return 0;
5546}
5547
5548/* Called with rtnl_lock */
5549static int
5550bnx2_change_mtu(struct net_device *dev, int new_mtu)
5551{
972ec0d4 5552 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5553
5554 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
5555 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
5556 return -EINVAL;
5557
5558 dev->mtu = new_mtu;
5559 if (netif_running(dev)) {
5560 bnx2_netif_stop(bp);
5561
5562 bnx2_init_nic(bp);
5563
5564 bnx2_netif_start(bp);
5565 }
5566 return 0;
5567}
5568
5569#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5570static void
5571poll_bnx2(struct net_device *dev)
5572{
972ec0d4 5573 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5574
5575 disable_irq(bp->pdev->irq);
7d12e780 5576 bnx2_interrupt(bp->pdev->irq, dev);
b6016b76
MC
5577 enable_irq(bp->pdev->irq);
5578}
5579#endif
5580
5581static int __devinit
5582bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
5583{
5584 struct bnx2 *bp;
5585 unsigned long mem_len;
5586 int rc;
5587 u32 reg;
5588
5589 SET_MODULE_OWNER(dev);
5590 SET_NETDEV_DEV(dev, &pdev->dev);
972ec0d4 5591 bp = netdev_priv(dev);
b6016b76
MC
5592
5593 bp->flags = 0;
5594 bp->phy_flags = 0;
5595
5596 /* enable device (incl. PCI PM wakeup), and bus-mastering */
5597 rc = pci_enable_device(pdev);
5598 if (rc) {
9b91cf9d 5599 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
b6016b76
MC
5600 goto err_out;
5601 }
5602
5603 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9b91cf9d 5604 dev_err(&pdev->dev,
2e8a538d 5605 "Cannot find PCI device base address, aborting.\n");
b6016b76
MC
5606 rc = -ENODEV;
5607 goto err_out_disable;
5608 }
5609
5610 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
5611 if (rc) {
9b91cf9d 5612 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
b6016b76
MC
5613 goto err_out_disable;
5614 }
5615
5616 pci_set_master(pdev);
5617
5618 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
5619 if (bp->pm_cap == 0) {
9b91cf9d 5620 dev_err(&pdev->dev,
2e8a538d 5621 "Cannot find power management capability, aborting.\n");
b6016b76
MC
5622 rc = -EIO;
5623 goto err_out_release;
5624 }
5625
5626 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
5627 if (bp->pcix_cap == 0) {
9b91cf9d 5628 dev_err(&pdev->dev, "Cannot find PCIX capability, aborting.\n");
b6016b76
MC
5629 rc = -EIO;
5630 goto err_out_release;
5631 }
5632
5633 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
5634 bp->flags |= USING_DAC_FLAG;
5635 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
9b91cf9d 5636 dev_err(&pdev->dev,
2e8a538d 5637 "pci_set_consistent_dma_mask failed, aborting.\n");
b6016b76
MC
5638 rc = -EIO;
5639 goto err_out_release;
5640 }
5641 }
5642 else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
9b91cf9d 5643 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
b6016b76
MC
5644 rc = -EIO;
5645 goto err_out_release;
5646 }
5647
5648 bp->dev = dev;
5649 bp->pdev = pdev;
5650
5651 spin_lock_init(&bp->phy_lock);
b6016b76
MC
5652 INIT_WORK(&bp->reset_task, bnx2_reset_task, bp);
5653
5654 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
5655 mem_len = MB_GET_CID_ADDR(17);
5656 dev->mem_end = dev->mem_start + mem_len;
5657 dev->irq = pdev->irq;
5658
5659 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
5660
5661 if (!bp->regview) {
9b91cf9d 5662 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
b6016b76
MC
5663 rc = -ENOMEM;
5664 goto err_out_release;
5665 }
5666
5667 /* Configure byte swap and enable write to the reg_window registers.
5668 * Rely on CPU to do target byte swapping on big endian systems
5669 * The chip's target access swapping will not swap all accesses
5670 */
5671 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
5672 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
5673 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
5674
829ca9a3 5675 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
5676
5677 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
5678
b6016b76
MC
5679 /* Get bus information. */
5680 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
5681 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
5682 u32 clkreg;
5683
5684 bp->flags |= PCIX_FLAG;
5685
5686 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6aa20a22 5687
b6016b76
MC
5688 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
5689 switch (clkreg) {
5690 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
5691 bp->bus_speed_mhz = 133;
5692 break;
5693
5694 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
5695 bp->bus_speed_mhz = 100;
5696 break;
5697
5698 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
5699 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
5700 bp->bus_speed_mhz = 66;
5701 break;
5702
5703 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
5704 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
5705 bp->bus_speed_mhz = 50;
5706 break;
5707
5708 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
5709 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
5710 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
5711 bp->bus_speed_mhz = 33;
5712 break;
5713 }
5714 }
5715 else {
5716 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
5717 bp->bus_speed_mhz = 66;
5718 else
5719 bp->bus_speed_mhz = 33;
5720 }
5721
5722 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
5723 bp->flags |= PCI_32BIT_FLAG;
5724
5725 /* 5706A0 may falsely detect SERR and PERR. */
5726 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5727 reg = REG_RD(bp, PCI_COMMAND);
5728 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
5729 REG_WR(bp, PCI_COMMAND, reg);
5730 }
5731 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
5732 !(bp->flags & PCIX_FLAG)) {
5733
9b91cf9d 5734 dev_err(&pdev->dev,
2e8a538d 5735 "5706 A1 can only be used in a PCIX bus, aborting.\n");
b6016b76
MC
5736 goto err_out_unmap;
5737 }
5738
5739 bnx2_init_nvram(bp);
5740
e3648b3d
MC
5741 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
5742
5743 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
5744 BNX2_SHM_HDR_SIGNATURE_SIG)
5745 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0);
5746 else
5747 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
5748
b6016b76
MC
5749 /* Get the permanent MAC address. First we need to make sure the
5750 * firmware is actually running.
5751 */
e3648b3d 5752 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
b6016b76
MC
5753
5754 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
5755 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
9b91cf9d 5756 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
b6016b76
MC
5757 rc = -ENODEV;
5758 goto err_out_unmap;
5759 }
5760
e3648b3d 5761 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
b6016b76 5762
e3648b3d 5763 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
b6016b76
MC
5764 bp->mac_addr[0] = (u8) (reg >> 8);
5765 bp->mac_addr[1] = (u8) reg;
5766
e3648b3d 5767 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
b6016b76
MC
5768 bp->mac_addr[2] = (u8) (reg >> 24);
5769 bp->mac_addr[3] = (u8) (reg >> 16);
5770 bp->mac_addr[4] = (u8) (reg >> 8);
5771 bp->mac_addr[5] = (u8) reg;
5772
5773 bp->tx_ring_size = MAX_TX_DESC_CNT;
932f3772 5774 bnx2_set_rx_ring_size(bp, 255);
b6016b76
MC
5775
5776 bp->rx_csum = 1;
5777
5778 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
5779
5780 bp->tx_quick_cons_trip_int = 20;
5781 bp->tx_quick_cons_trip = 20;
5782 bp->tx_ticks_int = 80;
5783 bp->tx_ticks = 80;
6aa20a22 5784
b6016b76
MC
5785 bp->rx_quick_cons_trip_int = 6;
5786 bp->rx_quick_cons_trip = 6;
5787 bp->rx_ticks_int = 18;
5788 bp->rx_ticks = 18;
5789
5790 bp->stats_ticks = 1000000 & 0xffff00;
5791
5792 bp->timer_interval = HZ;
cd339a0e 5793 bp->current_interval = HZ;
b6016b76 5794
5b0c76ad
MC
5795 bp->phy_addr = 1;
5796
b6016b76
MC
5797 /* Disable WOL support if we are running on a SERDES chip. */
5798 if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT) {
5799 bp->phy_flags |= PHY_SERDES_FLAG;
5800 bp->flags |= NO_WOL_FLAG;
5b0c76ad
MC
5801 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5802 bp->phy_addr = 2;
e3648b3d 5803 reg = REG_RD_IND(bp, bp->shmem_base +
5b0c76ad
MC
5804 BNX2_SHARED_HW_CFG_CONFIG);
5805 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
5806 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
5807 }
b6016b76
MC
5808 }
5809
16088272
MC
5810 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
5811 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
5812 (CHIP_ID(bp) == CHIP_ID_5708_B1))
dda1e390
MC
5813 bp->flags |= NO_WOL_FLAG;
5814
b6016b76
MC
5815 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
5816 bp->tx_quick_cons_trip_int =
5817 bp->tx_quick_cons_trip;
5818 bp->tx_ticks_int = bp->tx_ticks;
5819 bp->rx_quick_cons_trip_int =
5820 bp->rx_quick_cons_trip;
5821 bp->rx_ticks_int = bp->rx_ticks;
5822 bp->comp_prod_trip_int = bp->comp_prod_trip;
5823 bp->com_ticks_int = bp->com_ticks;
5824 bp->cmd_ticks_int = bp->cmd_ticks;
5825 }
5826
f9317a40
MC
5827 /* Disable MSI on 5706 if AMD 8132 bridge is found.
5828 *
5829 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
5830 * with byte enables disabled on the unused 32-bit word. This is legal
5831 * but causes problems on the AMD 8132 which will eventually stop
5832 * responding after a while.
5833 *
5834 * AMD believes this incompatibility is unique to the 5706, and
5835 * prefers to locally disable MSI rather than globally disabling it
5836 * using pci_msi_quirk.
5837 */
5838 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
5839 struct pci_dev *amd_8132 = NULL;
5840
5841 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
5842 PCI_DEVICE_ID_AMD_8132_BRIDGE,
5843 amd_8132))) {
5844 u8 rev;
5845
5846 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
5847 if (rev >= 0x10 && rev <= 0x13) {
5848 disable_msi = 1;
5849 pci_dev_put(amd_8132);
5850 break;
5851 }
5852 }
5853 }
5854
b6016b76
MC
5855 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
5856 bp->req_line_speed = 0;
5857 if (bp->phy_flags & PHY_SERDES_FLAG) {
5858 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
cd339a0e 5859
e3648b3d 5860 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
cd339a0e
MC
5861 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
5862 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
5863 bp->autoneg = 0;
5864 bp->req_line_speed = bp->line_speed = SPEED_1000;
5865 bp->req_duplex = DUPLEX_FULL;
5866 }
b6016b76
MC
5867 }
5868 else {
5869 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
5870 }
5871
5872 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
5873
cd339a0e
MC
5874 init_timer(&bp->timer);
5875 bp->timer.expires = RUN_AT(bp->timer_interval);
5876 bp->timer.data = (unsigned long) bp;
5877 bp->timer.function = bnx2_timer;
5878
b6016b76
MC
5879 return 0;
5880
5881err_out_unmap:
5882 if (bp->regview) {
5883 iounmap(bp->regview);
73eef4cd 5884 bp->regview = NULL;
b6016b76
MC
5885 }
5886
5887err_out_release:
5888 pci_release_regions(pdev);
5889
5890err_out_disable:
5891 pci_disable_device(pdev);
5892 pci_set_drvdata(pdev, NULL);
5893
5894err_out:
5895 return rc;
5896}
5897
5898static int __devinit
5899bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
5900{
5901 static int version_printed = 0;
5902 struct net_device *dev = NULL;
5903 struct bnx2 *bp;
5904 int rc, i;
5905
5906 if (version_printed++ == 0)
5907 printk(KERN_INFO "%s", version);
5908
5909 /* dev zeroed in init_etherdev */
5910 dev = alloc_etherdev(sizeof(*bp));
5911
5912 if (!dev)
5913 return -ENOMEM;
5914
5915 rc = bnx2_init_board(pdev, dev);
5916 if (rc < 0) {
5917 free_netdev(dev);
5918 return rc;
5919 }
5920
5921 dev->open = bnx2_open;
5922 dev->hard_start_xmit = bnx2_start_xmit;
5923 dev->stop = bnx2_close;
5924 dev->get_stats = bnx2_get_stats;
5925 dev->set_multicast_list = bnx2_set_rx_mode;
5926 dev->do_ioctl = bnx2_ioctl;
5927 dev->set_mac_address = bnx2_change_mac_addr;
5928 dev->change_mtu = bnx2_change_mtu;
5929 dev->tx_timeout = bnx2_tx_timeout;
5930 dev->watchdog_timeo = TX_TIMEOUT;
5931#ifdef BCM_VLAN
5932 dev->vlan_rx_register = bnx2_vlan_rx_register;
5933 dev->vlan_rx_kill_vid = bnx2_vlan_rx_kill_vid;
5934#endif
5935 dev->poll = bnx2_poll;
5936 dev->ethtool_ops = &bnx2_ethtool_ops;
5937 dev->weight = 64;
5938
972ec0d4 5939 bp = netdev_priv(dev);
b6016b76
MC
5940
5941#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
5942 dev->poll_controller = poll_bnx2;
5943#endif
5944
5945 if ((rc = register_netdev(dev))) {
9b91cf9d 5946 dev_err(&pdev->dev, "Cannot register net device\n");
b6016b76
MC
5947 if (bp->regview)
5948 iounmap(bp->regview);
5949 pci_release_regions(pdev);
5950 pci_disable_device(pdev);
5951 pci_set_drvdata(pdev, NULL);
5952 free_netdev(dev);
5953 return rc;
5954 }
5955
5956 pci_set_drvdata(pdev, dev);
5957
5958 memcpy(dev->dev_addr, bp->mac_addr, 6);
24b8e05d 5959 memcpy(dev->perm_addr, bp->mac_addr, 6);
b6016b76
MC
5960 bp->name = board_info[ent->driver_data].name,
5961 printk(KERN_INFO "%s: %s (%c%d) PCI%s %s %dMHz found at mem %lx, "
5962 "IRQ %d, ",
5963 dev->name,
5964 bp->name,
5965 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
5966 ((CHIP_ID(bp) & 0x0ff0) >> 4),
5967 ((bp->flags & PCIX_FLAG) ? "-X" : ""),
5968 ((bp->flags & PCI_32BIT_FLAG) ? "32-bit" : "64-bit"),
5969 bp->bus_speed_mhz,
5970 dev->base_addr,
5971 bp->pdev->irq);
5972
5973 printk("node addr ");
5974 for (i = 0; i < 6; i++)
5975 printk("%2.2x", dev->dev_addr[i]);
5976 printk("\n");
5977
5978 dev->features |= NETIF_F_SG;
5979 if (bp->flags & USING_DAC_FLAG)
5980 dev->features |= NETIF_F_HIGHDMA;
5981 dev->features |= NETIF_F_IP_CSUM;
5982#ifdef BCM_VLAN
5983 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
5984#endif
5985#ifdef BCM_TSO
b11d6213 5986 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
b6016b76
MC
5987#endif
5988
5989 netif_carrier_off(bp->dev);
5990
5991 return 0;
5992}
5993
5994static void __devexit
5995bnx2_remove_one(struct pci_dev *pdev)
5996{
5997 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 5998 struct bnx2 *bp = netdev_priv(dev);
b6016b76 5999
afdc08b9
MC
6000 flush_scheduled_work();
6001
b6016b76
MC
6002 unregister_netdev(dev);
6003
6004 if (bp->regview)
6005 iounmap(bp->regview);
6006
6007 free_netdev(dev);
6008 pci_release_regions(pdev);
6009 pci_disable_device(pdev);
6010 pci_set_drvdata(pdev, NULL);
6011}
6012
6013static int
829ca9a3 6014bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
b6016b76
MC
6015{
6016 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 6017 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6018 u32 reset_code;
6019
6020 if (!netif_running(dev))
6021 return 0;
6022
1d60290f 6023 flush_scheduled_work();
b6016b76
MC
6024 bnx2_netif_stop(bp);
6025 netif_device_detach(dev);
6026 del_timer_sync(&bp->timer);
dda1e390 6027 if (bp->flags & NO_WOL_FLAG)
6c4f095e 6028 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
dda1e390 6029 else if (bp->wol)
b6016b76
MC
6030 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6031 else
6032 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6033 bnx2_reset_chip(bp, reset_code);
6034 bnx2_free_skbs(bp);
829ca9a3 6035 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
b6016b76
MC
6036 return 0;
6037}
6038
6039static int
6040bnx2_resume(struct pci_dev *pdev)
6041{
6042 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 6043 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6044
6045 if (!netif_running(dev))
6046 return 0;
6047
829ca9a3 6048 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
6049 netif_device_attach(dev);
6050 bnx2_init_nic(bp);
6051 bnx2_netif_start(bp);
6052 return 0;
6053}
6054
6055static struct pci_driver bnx2_pci_driver = {
14ab9b86
PH
6056 .name = DRV_MODULE_NAME,
6057 .id_table = bnx2_pci_tbl,
6058 .probe = bnx2_init_one,
6059 .remove = __devexit_p(bnx2_remove_one),
6060 .suspend = bnx2_suspend,
6061 .resume = bnx2_resume,
b6016b76
MC
6062};
6063
6064static int __init bnx2_init(void)
6065{
29917620 6066 return pci_register_driver(&bnx2_pci_driver);
b6016b76
MC
6067}
6068
6069static void __exit bnx2_cleanup(void)
6070{
6071 pci_unregister_driver(&bnx2_pci_driver);
6072}
6073
6074module_init(bnx2_init);
6075module_exit(bnx2_cleanup);
6076
6077
6078