]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2.c
[BNX2]: Add ethtool support for remote PHY.
[net-next-2.6.git] / drivers / net / bnx2.c
CommitLineData
b6016b76
MC
1/* bnx2.c: Broadcom NX2 network driver.
2 *
72fbaeb6 3 * Copyright (c) 2004-2007 Broadcom Corporation
b6016b76
MC
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
f2a4f052
MC
12
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15
16#include <linux/kernel.h>
17#include <linux/timer.h>
18#include <linux/errno.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/interrupt.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/dma-mapping.h>
29#include <asm/bitops.h>
30#include <asm/io.h>
31#include <asm/irq.h>
32#include <linux/delay.h>
33#include <asm/byteorder.h>
c86a31f4 34#include <asm/page.h>
f2a4f052
MC
35#include <linux/time.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
38#ifdef NETIF_F_HW_VLAN_TX
39#include <linux/if_vlan.h>
40#define BCM_VLAN 1
41#endif
f2a4f052
MC
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
f2a4f052
MC
45#include <linux/workqueue.h>
46#include <linux/crc32.h>
47#include <linux/prefetch.h>
29b12174 48#include <linux/cache.h>
fba9fe91 49#include <linux/zlib.h>
f2a4f052 50
b6016b76
MC
51#include "bnx2.h"
52#include "bnx2_fw.h"
d43584c8 53#include "bnx2_fw2.h"
b6016b76
MC
54
55#define DRV_MODULE_NAME "bnx2"
56#define PFX DRV_MODULE_NAME ": "
b91b9fd1
MC
57#define DRV_MODULE_VERSION "1.5.11"
58#define DRV_MODULE_RELDATE "June 4, 2007"
b6016b76
MC
59
60#define RUN_AT(x) (jiffies + (x))
61
62/* Time in jiffies before concluding the transmitter is hung. */
63#define TX_TIMEOUT (5*HZ)
64
e19360f2 65static const char version[] __devinitdata =
b6016b76
MC
66 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
67
68MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
05d0f1cf 69MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708 Driver");
b6016b76
MC
70MODULE_LICENSE("GPL");
71MODULE_VERSION(DRV_MODULE_VERSION);
72
73static int disable_msi = 0;
74
75module_param(disable_msi, int, 0);
76MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
77
78typedef enum {
79 BCM5706 = 0,
80 NC370T,
81 NC370I,
82 BCM5706S,
83 NC370F,
5b0c76ad
MC
84 BCM5708,
85 BCM5708S,
bac0dff6 86 BCM5709,
27a005b8 87 BCM5709S,
b6016b76
MC
88} board_t;
89
90/* indexed by board_t, above */
f71e1309 91static const struct {
b6016b76
MC
92 char *name;
93} board_info[] __devinitdata = {
94 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
95 { "HP NC370T Multifunction Gigabit Server Adapter" },
96 { "HP NC370i Multifunction Gigabit Server Adapter" },
97 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
98 { "HP NC370F Multifunction Gigabit Server Adapter" },
5b0c76ad
MC
99 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
100 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
bac0dff6 101 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
27a005b8 102 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
b6016b76
MC
103 };
104
105static struct pci_device_id bnx2_pci_tbl[] = {
106 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
107 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
108 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
109 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
110 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
111 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
5b0c76ad
MC
112 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
113 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
b6016b76
MC
114 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
115 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
116 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
117 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
5b0c76ad
MC
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
bac0dff6
MC
120 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
121 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
27a005b8
MC
122 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
123 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
b6016b76
MC
124 { 0, }
125};
126
127static struct flash_spec flash_table[] =
128{
129 /* Slow EEPROM */
37137709 130 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
b6016b76
MC
131 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
132 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
133 "EEPROM - slow"},
37137709
MC
134 /* Expansion entry 0001 */
135 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
b6016b76 136 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
137 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
138 "Entry 0001"},
b6016b76
MC
139 /* Saifun SA25F010 (non-buffered flash) */
140 /* strap, cfg1, & write1 need updates */
37137709 141 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
b6016b76
MC
142 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
143 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
144 "Non-buffered flash (128kB)"},
145 /* Saifun SA25F020 (non-buffered flash) */
146 /* strap, cfg1, & write1 need updates */
37137709 147 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
b6016b76
MC
148 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
149 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
150 "Non-buffered flash (256kB)"},
37137709
MC
151 /* Expansion entry 0100 */
152 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
153 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
154 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
155 "Entry 0100"},
156 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
6aa20a22 157 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
37137709
MC
158 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
159 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
160 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
161 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
162 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
163 0, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
164 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
165 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
166 /* Saifun SA25F005 (non-buffered flash) */
167 /* strap, cfg1, & write1 need updates */
168 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
169 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
170 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
171 "Non-buffered flash (64kB)"},
172 /* Fast EEPROM */
173 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
174 1, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
175 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
176 "EEPROM - fast"},
177 /* Expansion entry 1001 */
178 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
179 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
180 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
181 "Entry 1001"},
182 /* Expansion entry 1010 */
183 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
184 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
185 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
186 "Entry 1010"},
187 /* ATMEL AT45DB011B (buffered flash) */
188 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
189 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
190 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
191 "Buffered flash (128kB)"},
192 /* Expansion entry 1100 */
193 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
194 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
196 "Entry 1100"},
197 /* Expansion entry 1101 */
198 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
199 0, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
200 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
201 "Entry 1101"},
202 /* Ateml Expansion entry 1110 */
203 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
204 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
205 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
206 "Entry 1110 (Atmel)"},
207 /* ATMEL AT45DB021B (buffered flash) */
208 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
209 1, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
210 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
211 "Buffered flash (256kB)"},
b6016b76
MC
212};
213
214MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
215
e89bbf10
MC
216static inline u32 bnx2_tx_avail(struct bnx2 *bp)
217{
2f8af120 218 u32 diff;
e89bbf10 219
2f8af120 220 smp_mb();
faac9c4b
MC
221
222 /* The ring uses 256 indices for 255 entries, one of them
223 * needs to be skipped.
224 */
225 diff = bp->tx_prod - bp->tx_cons;
226 if (unlikely(diff >= TX_DESC_CNT)) {
227 diff &= 0xffff;
228 if (diff == TX_DESC_CNT)
229 diff = MAX_TX_DESC_CNT;
230 }
e89bbf10
MC
231 return (bp->tx_ring_size - diff);
232}
233
b6016b76
MC
234static u32
235bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
236{
1b8227c4
MC
237 u32 val;
238
239 spin_lock_bh(&bp->indirect_lock);
b6016b76 240 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
1b8227c4
MC
241 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
242 spin_unlock_bh(&bp->indirect_lock);
243 return val;
b6016b76
MC
244}
245
246static void
247bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
248{
1b8227c4 249 spin_lock_bh(&bp->indirect_lock);
b6016b76
MC
250 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
251 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
1b8227c4 252 spin_unlock_bh(&bp->indirect_lock);
b6016b76
MC
253}
254
255static void
256bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
257{
258 offset += cid_addr;
1b8227c4 259 spin_lock_bh(&bp->indirect_lock);
59b47d8a
MC
260 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
261 int i;
262
263 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
264 REG_WR(bp, BNX2_CTX_CTX_CTRL,
265 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
266 for (i = 0; i < 5; i++) {
267 u32 val;
268 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
269 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
270 break;
271 udelay(5);
272 }
273 } else {
274 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
275 REG_WR(bp, BNX2_CTX_DATA, val);
276 }
1b8227c4 277 spin_unlock_bh(&bp->indirect_lock);
b6016b76
MC
278}
279
280static int
281bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
282{
283 u32 val1;
284 int i, ret;
285
286 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
287 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
288 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
289
290 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
291 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
292
293 udelay(40);
294 }
295
296 val1 = (bp->phy_addr << 21) | (reg << 16) |
297 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
298 BNX2_EMAC_MDIO_COMM_START_BUSY;
299 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
300
301 for (i = 0; i < 50; i++) {
302 udelay(10);
303
304 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
305 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
306 udelay(5);
307
308 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
309 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
310
311 break;
312 }
313 }
314
315 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
316 *val = 0x0;
317 ret = -EBUSY;
318 }
319 else {
320 *val = val1;
321 ret = 0;
322 }
323
324 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
325 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
326 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
327
328 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
329 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
330
331 udelay(40);
332 }
333
334 return ret;
335}
336
337static int
338bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
339{
340 u32 val1;
341 int i, ret;
342
343 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
344 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
345 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
346
347 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
348 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
349
350 udelay(40);
351 }
352
353 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
354 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
355 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
356 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
6aa20a22 357
b6016b76
MC
358 for (i = 0; i < 50; i++) {
359 udelay(10);
360
361 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
362 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
363 udelay(5);
364 break;
365 }
366 }
367
368 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
369 ret = -EBUSY;
370 else
371 ret = 0;
372
373 if (bp->phy_flags & PHY_INT_MODE_AUTO_POLLING_FLAG) {
374 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
375 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
376
377 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
378 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
379
380 udelay(40);
381 }
382
383 return ret;
384}
385
386static void
387bnx2_disable_int(struct bnx2 *bp)
388{
389 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
390 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
391 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
392}
393
394static void
395bnx2_enable_int(struct bnx2 *bp)
396{
1269a8a6
MC
397 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
398 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
399 BNX2_PCICFG_INT_ACK_CMD_MASK_INT | bp->last_status_idx);
400
b6016b76
MC
401 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
402 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID | bp->last_status_idx);
403
bf5295bb 404 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
b6016b76
MC
405}
406
407static void
408bnx2_disable_int_sync(struct bnx2 *bp)
409{
410 atomic_inc(&bp->intr_sem);
411 bnx2_disable_int(bp);
412 synchronize_irq(bp->pdev->irq);
413}
414
415static void
416bnx2_netif_stop(struct bnx2 *bp)
417{
418 bnx2_disable_int_sync(bp);
419 if (netif_running(bp->dev)) {
420 netif_poll_disable(bp->dev);
421 netif_tx_disable(bp->dev);
422 bp->dev->trans_start = jiffies; /* prevent tx timeout */
423 }
424}
425
426static void
427bnx2_netif_start(struct bnx2 *bp)
428{
429 if (atomic_dec_and_test(&bp->intr_sem)) {
430 if (netif_running(bp->dev)) {
431 netif_wake_queue(bp->dev);
432 netif_poll_enable(bp->dev);
433 bnx2_enable_int(bp);
434 }
435 }
436}
437
438static void
439bnx2_free_mem(struct bnx2 *bp)
440{
13daffa2
MC
441 int i;
442
59b47d8a
MC
443 for (i = 0; i < bp->ctx_pages; i++) {
444 if (bp->ctx_blk[i]) {
445 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
446 bp->ctx_blk[i],
447 bp->ctx_blk_mapping[i]);
448 bp->ctx_blk[i] = NULL;
449 }
450 }
b6016b76 451 if (bp->status_blk) {
0f31f994 452 pci_free_consistent(bp->pdev, bp->status_stats_size,
b6016b76
MC
453 bp->status_blk, bp->status_blk_mapping);
454 bp->status_blk = NULL;
0f31f994 455 bp->stats_blk = NULL;
b6016b76
MC
456 }
457 if (bp->tx_desc_ring) {
458 pci_free_consistent(bp->pdev,
459 sizeof(struct tx_bd) * TX_DESC_CNT,
460 bp->tx_desc_ring, bp->tx_desc_mapping);
461 bp->tx_desc_ring = NULL;
462 }
b4558ea9
JJ
463 kfree(bp->tx_buf_ring);
464 bp->tx_buf_ring = NULL;
13daffa2
MC
465 for (i = 0; i < bp->rx_max_ring; i++) {
466 if (bp->rx_desc_ring[i])
467 pci_free_consistent(bp->pdev,
468 sizeof(struct rx_bd) * RX_DESC_CNT,
469 bp->rx_desc_ring[i],
470 bp->rx_desc_mapping[i]);
471 bp->rx_desc_ring[i] = NULL;
472 }
473 vfree(bp->rx_buf_ring);
b4558ea9 474 bp->rx_buf_ring = NULL;
b6016b76
MC
475}
476
477static int
478bnx2_alloc_mem(struct bnx2 *bp)
479{
0f31f994 480 int i, status_blk_size;
13daffa2 481
0f31f994
MC
482 bp->tx_buf_ring = kzalloc(sizeof(struct sw_bd) * TX_DESC_CNT,
483 GFP_KERNEL);
b6016b76
MC
484 if (bp->tx_buf_ring == NULL)
485 return -ENOMEM;
486
b6016b76
MC
487 bp->tx_desc_ring = pci_alloc_consistent(bp->pdev,
488 sizeof(struct tx_bd) *
489 TX_DESC_CNT,
490 &bp->tx_desc_mapping);
491 if (bp->tx_desc_ring == NULL)
492 goto alloc_mem_err;
493
13daffa2
MC
494 bp->rx_buf_ring = vmalloc(sizeof(struct sw_bd) * RX_DESC_CNT *
495 bp->rx_max_ring);
b6016b76
MC
496 if (bp->rx_buf_ring == NULL)
497 goto alloc_mem_err;
498
13daffa2
MC
499 memset(bp->rx_buf_ring, 0, sizeof(struct sw_bd) * RX_DESC_CNT *
500 bp->rx_max_ring);
501
502 for (i = 0; i < bp->rx_max_ring; i++) {
503 bp->rx_desc_ring[i] =
504 pci_alloc_consistent(bp->pdev,
505 sizeof(struct rx_bd) * RX_DESC_CNT,
506 &bp->rx_desc_mapping[i]);
507 if (bp->rx_desc_ring[i] == NULL)
508 goto alloc_mem_err;
509
510 }
b6016b76 511
0f31f994
MC
512 /* Combine status and statistics blocks into one allocation. */
513 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
514 bp->status_stats_size = status_blk_size +
515 sizeof(struct statistics_block);
516
517 bp->status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
b6016b76
MC
518 &bp->status_blk_mapping);
519 if (bp->status_blk == NULL)
520 goto alloc_mem_err;
521
0f31f994 522 memset(bp->status_blk, 0, bp->status_stats_size);
b6016b76 523
0f31f994
MC
524 bp->stats_blk = (void *) ((unsigned long) bp->status_blk +
525 status_blk_size);
b6016b76 526
0f31f994 527 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
b6016b76 528
59b47d8a
MC
529 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
530 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
531 if (bp->ctx_pages == 0)
532 bp->ctx_pages = 1;
533 for (i = 0; i < bp->ctx_pages; i++) {
534 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
535 BCM_PAGE_SIZE,
536 &bp->ctx_blk_mapping[i]);
537 if (bp->ctx_blk[i] == NULL)
538 goto alloc_mem_err;
539 }
540 }
b6016b76
MC
541 return 0;
542
543alloc_mem_err:
544 bnx2_free_mem(bp);
545 return -ENOMEM;
546}
547
e3648b3d
MC
548static void
549bnx2_report_fw_link(struct bnx2 *bp)
550{
551 u32 fw_link_status = 0;
552
0d8a6571
MC
553 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
554 return;
555
e3648b3d
MC
556 if (bp->link_up) {
557 u32 bmsr;
558
559 switch (bp->line_speed) {
560 case SPEED_10:
561 if (bp->duplex == DUPLEX_HALF)
562 fw_link_status = BNX2_LINK_STATUS_10HALF;
563 else
564 fw_link_status = BNX2_LINK_STATUS_10FULL;
565 break;
566 case SPEED_100:
567 if (bp->duplex == DUPLEX_HALF)
568 fw_link_status = BNX2_LINK_STATUS_100HALF;
569 else
570 fw_link_status = BNX2_LINK_STATUS_100FULL;
571 break;
572 case SPEED_1000:
573 if (bp->duplex == DUPLEX_HALF)
574 fw_link_status = BNX2_LINK_STATUS_1000HALF;
575 else
576 fw_link_status = BNX2_LINK_STATUS_1000FULL;
577 break;
578 case SPEED_2500:
579 if (bp->duplex == DUPLEX_HALF)
580 fw_link_status = BNX2_LINK_STATUS_2500HALF;
581 else
582 fw_link_status = BNX2_LINK_STATUS_2500FULL;
583 break;
584 }
585
586 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
587
588 if (bp->autoneg) {
589 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
590
ca58c3af
MC
591 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
592 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
e3648b3d
MC
593
594 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
595 bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)
596 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
597 else
598 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
599 }
600 }
601 else
602 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
603
604 REG_WR_IND(bp, bp->shmem_base + BNX2_LINK_STATUS, fw_link_status);
605}
606
b6016b76
MC
607static void
608bnx2_report_link(struct bnx2 *bp)
609{
610 if (bp->link_up) {
611 netif_carrier_on(bp->dev);
612 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
613
614 printk("%d Mbps ", bp->line_speed);
615
616 if (bp->duplex == DUPLEX_FULL)
617 printk("full duplex");
618 else
619 printk("half duplex");
620
621 if (bp->flow_ctrl) {
622 if (bp->flow_ctrl & FLOW_CTRL_RX) {
623 printk(", receive ");
624 if (bp->flow_ctrl & FLOW_CTRL_TX)
625 printk("& transmit ");
626 }
627 else {
628 printk(", transmit ");
629 }
630 printk("flow control ON");
631 }
632 printk("\n");
633 }
634 else {
635 netif_carrier_off(bp->dev);
636 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
637 }
e3648b3d
MC
638
639 bnx2_report_fw_link(bp);
b6016b76
MC
640}
641
642static void
643bnx2_resolve_flow_ctrl(struct bnx2 *bp)
644{
645 u32 local_adv, remote_adv;
646
647 bp->flow_ctrl = 0;
6aa20a22 648 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
b6016b76
MC
649 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
650
651 if (bp->duplex == DUPLEX_FULL) {
652 bp->flow_ctrl = bp->req_flow_ctrl;
653 }
654 return;
655 }
656
657 if (bp->duplex != DUPLEX_FULL) {
658 return;
659 }
660
5b0c76ad
MC
661 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
662 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
663 u32 val;
664
665 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
666 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
667 bp->flow_ctrl |= FLOW_CTRL_TX;
668 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
669 bp->flow_ctrl |= FLOW_CTRL_RX;
670 return;
671 }
672
ca58c3af
MC
673 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
674 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
675
676 if (bp->phy_flags & PHY_SERDES_FLAG) {
677 u32 new_local_adv = 0;
678 u32 new_remote_adv = 0;
679
680 if (local_adv & ADVERTISE_1000XPAUSE)
681 new_local_adv |= ADVERTISE_PAUSE_CAP;
682 if (local_adv & ADVERTISE_1000XPSE_ASYM)
683 new_local_adv |= ADVERTISE_PAUSE_ASYM;
684 if (remote_adv & ADVERTISE_1000XPAUSE)
685 new_remote_adv |= ADVERTISE_PAUSE_CAP;
686 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
687 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
688
689 local_adv = new_local_adv;
690 remote_adv = new_remote_adv;
691 }
692
693 /* See Table 28B-3 of 802.3ab-1999 spec. */
694 if (local_adv & ADVERTISE_PAUSE_CAP) {
695 if(local_adv & ADVERTISE_PAUSE_ASYM) {
696 if (remote_adv & ADVERTISE_PAUSE_CAP) {
697 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
698 }
699 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
700 bp->flow_ctrl = FLOW_CTRL_RX;
701 }
702 }
703 else {
704 if (remote_adv & ADVERTISE_PAUSE_CAP) {
705 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
706 }
707 }
708 }
709 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
710 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
711 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
712
713 bp->flow_ctrl = FLOW_CTRL_TX;
714 }
715 }
716}
717
27a005b8
MC
718static int
719bnx2_5709s_linkup(struct bnx2 *bp)
720{
721 u32 val, speed;
722
723 bp->link_up = 1;
724
725 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
726 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
727 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
728
729 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
730 bp->line_speed = bp->req_line_speed;
731 bp->duplex = bp->req_duplex;
732 return 0;
733 }
734 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
735 switch (speed) {
736 case MII_BNX2_GP_TOP_AN_SPEED_10:
737 bp->line_speed = SPEED_10;
738 break;
739 case MII_BNX2_GP_TOP_AN_SPEED_100:
740 bp->line_speed = SPEED_100;
741 break;
742 case MII_BNX2_GP_TOP_AN_SPEED_1G:
743 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
744 bp->line_speed = SPEED_1000;
745 break;
746 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
747 bp->line_speed = SPEED_2500;
748 break;
749 }
750 if (val & MII_BNX2_GP_TOP_AN_FD)
751 bp->duplex = DUPLEX_FULL;
752 else
753 bp->duplex = DUPLEX_HALF;
754 return 0;
755}
756
b6016b76 757static int
5b0c76ad
MC
758bnx2_5708s_linkup(struct bnx2 *bp)
759{
760 u32 val;
761
762 bp->link_up = 1;
763 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
764 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
765 case BCM5708S_1000X_STAT1_SPEED_10:
766 bp->line_speed = SPEED_10;
767 break;
768 case BCM5708S_1000X_STAT1_SPEED_100:
769 bp->line_speed = SPEED_100;
770 break;
771 case BCM5708S_1000X_STAT1_SPEED_1G:
772 bp->line_speed = SPEED_1000;
773 break;
774 case BCM5708S_1000X_STAT1_SPEED_2G5:
775 bp->line_speed = SPEED_2500;
776 break;
777 }
778 if (val & BCM5708S_1000X_STAT1_FD)
779 bp->duplex = DUPLEX_FULL;
780 else
781 bp->duplex = DUPLEX_HALF;
782
783 return 0;
784}
785
786static int
787bnx2_5706s_linkup(struct bnx2 *bp)
b6016b76
MC
788{
789 u32 bmcr, local_adv, remote_adv, common;
790
791 bp->link_up = 1;
792 bp->line_speed = SPEED_1000;
793
ca58c3af 794 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
795 if (bmcr & BMCR_FULLDPLX) {
796 bp->duplex = DUPLEX_FULL;
797 }
798 else {
799 bp->duplex = DUPLEX_HALF;
800 }
801
802 if (!(bmcr & BMCR_ANENABLE)) {
803 return 0;
804 }
805
ca58c3af
MC
806 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
807 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
808
809 common = local_adv & remote_adv;
810 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
811
812 if (common & ADVERTISE_1000XFULL) {
813 bp->duplex = DUPLEX_FULL;
814 }
815 else {
816 bp->duplex = DUPLEX_HALF;
817 }
818 }
819
820 return 0;
821}
822
823static int
824bnx2_copper_linkup(struct bnx2 *bp)
825{
826 u32 bmcr;
827
ca58c3af 828 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
829 if (bmcr & BMCR_ANENABLE) {
830 u32 local_adv, remote_adv, common;
831
832 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
833 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
834
835 common = local_adv & (remote_adv >> 2);
836 if (common & ADVERTISE_1000FULL) {
837 bp->line_speed = SPEED_1000;
838 bp->duplex = DUPLEX_FULL;
839 }
840 else if (common & ADVERTISE_1000HALF) {
841 bp->line_speed = SPEED_1000;
842 bp->duplex = DUPLEX_HALF;
843 }
844 else {
ca58c3af
MC
845 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
846 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
847
848 common = local_adv & remote_adv;
849 if (common & ADVERTISE_100FULL) {
850 bp->line_speed = SPEED_100;
851 bp->duplex = DUPLEX_FULL;
852 }
853 else if (common & ADVERTISE_100HALF) {
854 bp->line_speed = SPEED_100;
855 bp->duplex = DUPLEX_HALF;
856 }
857 else if (common & ADVERTISE_10FULL) {
858 bp->line_speed = SPEED_10;
859 bp->duplex = DUPLEX_FULL;
860 }
861 else if (common & ADVERTISE_10HALF) {
862 bp->line_speed = SPEED_10;
863 bp->duplex = DUPLEX_HALF;
864 }
865 else {
866 bp->line_speed = 0;
867 bp->link_up = 0;
868 }
869 }
870 }
871 else {
872 if (bmcr & BMCR_SPEED100) {
873 bp->line_speed = SPEED_100;
874 }
875 else {
876 bp->line_speed = SPEED_10;
877 }
878 if (bmcr & BMCR_FULLDPLX) {
879 bp->duplex = DUPLEX_FULL;
880 }
881 else {
882 bp->duplex = DUPLEX_HALF;
883 }
884 }
885
886 return 0;
887}
888
889static int
890bnx2_set_mac_link(struct bnx2 *bp)
891{
892 u32 val;
893
894 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
895 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
896 (bp->duplex == DUPLEX_HALF)) {
897 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
898 }
899
900 /* Configure the EMAC mode register. */
901 val = REG_RD(bp, BNX2_EMAC_MODE);
902
903 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
5b0c76ad 904 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
59b47d8a 905 BNX2_EMAC_MODE_25G_MODE);
b6016b76
MC
906
907 if (bp->link_up) {
5b0c76ad
MC
908 switch (bp->line_speed) {
909 case SPEED_10:
59b47d8a
MC
910 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
911 val |= BNX2_EMAC_MODE_PORT_MII_10M;
5b0c76ad
MC
912 break;
913 }
914 /* fall through */
915 case SPEED_100:
916 val |= BNX2_EMAC_MODE_PORT_MII;
917 break;
918 case SPEED_2500:
59b47d8a 919 val |= BNX2_EMAC_MODE_25G_MODE;
5b0c76ad
MC
920 /* fall through */
921 case SPEED_1000:
922 val |= BNX2_EMAC_MODE_PORT_GMII;
923 break;
924 }
b6016b76
MC
925 }
926 else {
927 val |= BNX2_EMAC_MODE_PORT_GMII;
928 }
929
930 /* Set the MAC to operate in the appropriate duplex mode. */
931 if (bp->duplex == DUPLEX_HALF)
932 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
933 REG_WR(bp, BNX2_EMAC_MODE, val);
934
935 /* Enable/disable rx PAUSE. */
936 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
937
938 if (bp->flow_ctrl & FLOW_CTRL_RX)
939 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
940 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
941
942 /* Enable/disable tx PAUSE. */
943 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
944 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
945
946 if (bp->flow_ctrl & FLOW_CTRL_TX)
947 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
948 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
949
950 /* Acknowledge the interrupt. */
951 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
952
953 return 0;
954}
955
27a005b8
MC
956static void
957bnx2_enable_bmsr1(struct bnx2 *bp)
958{
959 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
960 (CHIP_NUM(bp) == CHIP_NUM_5709))
961 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
962 MII_BNX2_BLK_ADDR_GP_STATUS);
963}
964
965static void
966bnx2_disable_bmsr1(struct bnx2 *bp)
967{
968 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
969 (CHIP_NUM(bp) == CHIP_NUM_5709))
970 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
971 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
972}
973
605a9e20
MC
974static int
975bnx2_test_and_enable_2g5(struct bnx2 *bp)
976{
977 u32 up1;
978 int ret = 1;
979
980 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
981 return 0;
982
983 if (bp->autoneg & AUTONEG_SPEED)
984 bp->advertising |= ADVERTISED_2500baseX_Full;
985
27a005b8
MC
986 if (CHIP_NUM(bp) == CHIP_NUM_5709)
987 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
988
605a9e20
MC
989 bnx2_read_phy(bp, bp->mii_up1, &up1);
990 if (!(up1 & BCM5708S_UP1_2G5)) {
991 up1 |= BCM5708S_UP1_2G5;
992 bnx2_write_phy(bp, bp->mii_up1, up1);
993 ret = 0;
994 }
995
27a005b8
MC
996 if (CHIP_NUM(bp) == CHIP_NUM_5709)
997 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
998 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
999
605a9e20
MC
1000 return ret;
1001}
1002
1003static int
1004bnx2_test_and_disable_2g5(struct bnx2 *bp)
1005{
1006 u32 up1;
1007 int ret = 0;
1008
1009 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1010 return 0;
1011
27a005b8
MC
1012 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1013 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1014
605a9e20
MC
1015 bnx2_read_phy(bp, bp->mii_up1, &up1);
1016 if (up1 & BCM5708S_UP1_2G5) {
1017 up1 &= ~BCM5708S_UP1_2G5;
1018 bnx2_write_phy(bp, bp->mii_up1, up1);
1019 ret = 1;
1020 }
1021
27a005b8
MC
1022 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1023 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1024 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1025
605a9e20
MC
1026 return ret;
1027}
1028
1029static void
1030bnx2_enable_forced_2g5(struct bnx2 *bp)
1031{
1032 u32 bmcr;
1033
1034 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1035 return;
1036
27a005b8
MC
1037 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1038 u32 val;
1039
1040 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1041 MII_BNX2_BLK_ADDR_SERDES_DIG);
1042 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1043 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1044 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1045 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1046
1047 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1048 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1049 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1050
1051 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1052 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1053 bmcr |= BCM5708S_BMCR_FORCE_2500;
1054 }
1055
1056 if (bp->autoneg & AUTONEG_SPEED) {
1057 bmcr &= ~BMCR_ANENABLE;
1058 if (bp->req_duplex == DUPLEX_FULL)
1059 bmcr |= BMCR_FULLDPLX;
1060 }
1061 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1062}
1063
1064static void
1065bnx2_disable_forced_2g5(struct bnx2 *bp)
1066{
1067 u32 bmcr;
1068
1069 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
1070 return;
1071
27a005b8
MC
1072 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1073 u32 val;
1074
1075 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1076 MII_BNX2_BLK_ADDR_SERDES_DIG);
1077 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1078 val &= ~MII_BNX2_SD_MISC1_FORCE;
1079 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1080
1081 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1082 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1083 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1084
1085 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1086 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1087 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1088 }
1089
1090 if (bp->autoneg & AUTONEG_SPEED)
1091 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1092 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1093}
1094
b6016b76
MC
1095static int
1096bnx2_set_link(struct bnx2 *bp)
1097{
1098 u32 bmsr;
1099 u8 link_up;
1100
80be4434 1101 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
b6016b76
MC
1102 bp->link_up = 1;
1103 return 0;
1104 }
1105
0d8a6571
MC
1106 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1107 return 0;
1108
b6016b76
MC
1109 link_up = bp->link_up;
1110
27a005b8
MC
1111 bnx2_enable_bmsr1(bp);
1112 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1113 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1114 bnx2_disable_bmsr1(bp);
b6016b76
MC
1115
1116 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
1117 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1118 u32 val;
1119
1120 val = REG_RD(bp, BNX2_EMAC_STATUS);
1121 if (val & BNX2_EMAC_STATUS_LINK)
1122 bmsr |= BMSR_LSTATUS;
1123 else
1124 bmsr &= ~BMSR_LSTATUS;
1125 }
1126
1127 if (bmsr & BMSR_LSTATUS) {
1128 bp->link_up = 1;
1129
1130 if (bp->phy_flags & PHY_SERDES_FLAG) {
5b0c76ad
MC
1131 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1132 bnx2_5706s_linkup(bp);
1133 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1134 bnx2_5708s_linkup(bp);
27a005b8
MC
1135 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1136 bnx2_5709s_linkup(bp);
b6016b76
MC
1137 }
1138 else {
1139 bnx2_copper_linkup(bp);
1140 }
1141 bnx2_resolve_flow_ctrl(bp);
1142 }
1143 else {
1144 if ((bp->phy_flags & PHY_SERDES_FLAG) &&
605a9e20
MC
1145 (bp->autoneg & AUTONEG_SPEED))
1146 bnx2_disable_forced_2g5(bp);
b6016b76 1147
b6016b76
MC
1148 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1149 bp->link_up = 0;
1150 }
1151
1152 if (bp->link_up != link_up) {
1153 bnx2_report_link(bp);
1154 }
1155
1156 bnx2_set_mac_link(bp);
1157
1158 return 0;
1159}
1160
1161static int
1162bnx2_reset_phy(struct bnx2 *bp)
1163{
1164 int i;
1165 u32 reg;
1166
ca58c3af 1167 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
b6016b76
MC
1168
1169#define PHY_RESET_MAX_WAIT 100
1170 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1171 udelay(10);
1172
ca58c3af 1173 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
b6016b76
MC
1174 if (!(reg & BMCR_RESET)) {
1175 udelay(20);
1176 break;
1177 }
1178 }
1179 if (i == PHY_RESET_MAX_WAIT) {
1180 return -EBUSY;
1181 }
1182 return 0;
1183}
1184
1185static u32
1186bnx2_phy_get_pause_adv(struct bnx2 *bp)
1187{
1188 u32 adv = 0;
1189
1190 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1191 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1192
1193 if (bp->phy_flags & PHY_SERDES_FLAG) {
1194 adv = ADVERTISE_1000XPAUSE;
1195 }
1196 else {
1197 adv = ADVERTISE_PAUSE_CAP;
1198 }
1199 }
1200 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1201 if (bp->phy_flags & PHY_SERDES_FLAG) {
1202 adv = ADVERTISE_1000XPSE_ASYM;
1203 }
1204 else {
1205 adv = ADVERTISE_PAUSE_ASYM;
1206 }
1207 }
1208 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1209 if (bp->phy_flags & PHY_SERDES_FLAG) {
1210 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1211 }
1212 else {
1213 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1214 }
1215 }
1216 return adv;
1217}
1218
0d8a6571
MC
1219static int bnx2_fw_sync(struct bnx2 *, u32, int);
1220
b6016b76 1221static int
0d8a6571
MC
1222bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1223{
1224 u32 speed_arg = 0, pause_adv;
1225
1226 pause_adv = bnx2_phy_get_pause_adv(bp);
1227
1228 if (bp->autoneg & AUTONEG_SPEED) {
1229 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1230 if (bp->advertising & ADVERTISED_10baseT_Half)
1231 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1232 if (bp->advertising & ADVERTISED_10baseT_Full)
1233 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1234 if (bp->advertising & ADVERTISED_100baseT_Half)
1235 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1236 if (bp->advertising & ADVERTISED_100baseT_Full)
1237 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1238 if (bp->advertising & ADVERTISED_1000baseT_Full)
1239 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1240 if (bp->advertising & ADVERTISED_2500baseX_Full)
1241 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1242 } else {
1243 if (bp->req_line_speed == SPEED_2500)
1244 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1245 else if (bp->req_line_speed == SPEED_1000)
1246 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1247 else if (bp->req_line_speed == SPEED_100) {
1248 if (bp->req_duplex == DUPLEX_FULL)
1249 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1250 else
1251 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1252 } else if (bp->req_line_speed == SPEED_10) {
1253 if (bp->req_duplex == DUPLEX_FULL)
1254 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1255 else
1256 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1257 }
1258 }
1259
1260 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1261 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1262 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_1000XPSE_ASYM))
1263 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1264
1265 if (port == PORT_TP)
1266 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1267 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1268
1269 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB_ARG0, speed_arg);
1270
1271 spin_unlock_bh(&bp->phy_lock);
1272 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 0);
1273 spin_lock_bh(&bp->phy_lock);
1274
1275 return 0;
1276}
1277
1278static int
1279bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
b6016b76 1280{
605a9e20 1281 u32 adv, bmcr;
b6016b76
MC
1282 u32 new_adv = 0;
1283
0d8a6571
MC
1284 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1285 return (bnx2_setup_remote_phy(bp, port));
1286
b6016b76
MC
1287 if (!(bp->autoneg & AUTONEG_SPEED)) {
1288 u32 new_bmcr;
5b0c76ad
MC
1289 int force_link_down = 0;
1290
605a9e20
MC
1291 if (bp->req_line_speed == SPEED_2500) {
1292 if (!bnx2_test_and_enable_2g5(bp))
1293 force_link_down = 1;
1294 } else if (bp->req_line_speed == SPEED_1000) {
1295 if (bnx2_test_and_disable_2g5(bp))
1296 force_link_down = 1;
1297 }
ca58c3af 1298 bnx2_read_phy(bp, bp->mii_adv, &adv);
80be4434
MC
1299 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1300
ca58c3af 1301 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
605a9e20 1302 new_bmcr = bmcr & ~BMCR_ANENABLE;
80be4434 1303 new_bmcr |= BMCR_SPEED1000;
605a9e20 1304
27a005b8
MC
1305 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1306 if (bp->req_line_speed == SPEED_2500)
1307 bnx2_enable_forced_2g5(bp);
1308 else if (bp->req_line_speed == SPEED_1000) {
1309 bnx2_disable_forced_2g5(bp);
1310 new_bmcr &= ~0x2000;
1311 }
1312
1313 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1314 if (bp->req_line_speed == SPEED_2500)
1315 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1316 else
1317 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
5b0c76ad
MC
1318 }
1319
b6016b76 1320 if (bp->req_duplex == DUPLEX_FULL) {
5b0c76ad 1321 adv |= ADVERTISE_1000XFULL;
b6016b76
MC
1322 new_bmcr |= BMCR_FULLDPLX;
1323 }
1324 else {
5b0c76ad 1325 adv |= ADVERTISE_1000XHALF;
b6016b76
MC
1326 new_bmcr &= ~BMCR_FULLDPLX;
1327 }
5b0c76ad 1328 if ((new_bmcr != bmcr) || (force_link_down)) {
b6016b76
MC
1329 /* Force a link down visible on the other side */
1330 if (bp->link_up) {
ca58c3af 1331 bnx2_write_phy(bp, bp->mii_adv, adv &
5b0c76ad
MC
1332 ~(ADVERTISE_1000XFULL |
1333 ADVERTISE_1000XHALF));
ca58c3af 1334 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
b6016b76
MC
1335 BMCR_ANRESTART | BMCR_ANENABLE);
1336
1337 bp->link_up = 0;
1338 netif_carrier_off(bp->dev);
ca58c3af 1339 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
80be4434 1340 bnx2_report_link(bp);
b6016b76 1341 }
ca58c3af
MC
1342 bnx2_write_phy(bp, bp->mii_adv, adv);
1343 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
605a9e20
MC
1344 } else {
1345 bnx2_resolve_flow_ctrl(bp);
1346 bnx2_set_mac_link(bp);
b6016b76
MC
1347 }
1348 return 0;
1349 }
1350
605a9e20 1351 bnx2_test_and_enable_2g5(bp);
5b0c76ad 1352
b6016b76
MC
1353 if (bp->advertising & ADVERTISED_1000baseT_Full)
1354 new_adv |= ADVERTISE_1000XFULL;
1355
1356 new_adv |= bnx2_phy_get_pause_adv(bp);
1357
ca58c3af
MC
1358 bnx2_read_phy(bp, bp->mii_adv, &adv);
1359 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1360
1361 bp->serdes_an_pending = 0;
1362 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1363 /* Force a link down visible on the other side */
1364 if (bp->link_up) {
ca58c3af 1365 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
80be4434
MC
1366 spin_unlock_bh(&bp->phy_lock);
1367 msleep(20);
1368 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
1369 }
1370
ca58c3af
MC
1371 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1372 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
b6016b76 1373 BMCR_ANENABLE);
f8dd064e
MC
1374 /* Speed up link-up time when the link partner
1375 * does not autonegotiate which is very common
1376 * in blade servers. Some blade servers use
1377 * IPMI for kerboard input and it's important
1378 * to minimize link disruptions. Autoneg. involves
1379 * exchanging base pages plus 3 next pages and
1380 * normally completes in about 120 msec.
1381 */
1382 bp->current_interval = SERDES_AN_TIMEOUT;
1383 bp->serdes_an_pending = 1;
1384 mod_timer(&bp->timer, jiffies + bp->current_interval);
605a9e20
MC
1385 } else {
1386 bnx2_resolve_flow_ctrl(bp);
1387 bnx2_set_mac_link(bp);
b6016b76
MC
1388 }
1389
1390 return 0;
1391}
1392
1393#define ETHTOOL_ALL_FIBRE_SPEED \
deaf391b
MC
1394 (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ? \
1395 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1396 (ADVERTISED_1000baseT_Full)
b6016b76
MC
1397
1398#define ETHTOOL_ALL_COPPER_SPEED \
1399 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1400 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1401 ADVERTISED_1000baseT_Full)
1402
1403#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1404 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
6aa20a22 1405
b6016b76
MC
1406#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1407
0d8a6571
MC
1408static void
1409bnx2_set_default_remote_link(struct bnx2 *bp)
1410{
1411 u32 link;
1412
1413 if (bp->phy_port == PORT_TP)
1414 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_COPPER_LINK);
1415 else
1416 link = REG_RD_IND(bp, bp->shmem_base + BNX2_RPHY_SERDES_LINK);
1417
1418 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1419 bp->req_line_speed = 0;
1420 bp->autoneg |= AUTONEG_SPEED;
1421 bp->advertising = ADVERTISED_Autoneg;
1422 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1423 bp->advertising |= ADVERTISED_10baseT_Half;
1424 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1425 bp->advertising |= ADVERTISED_10baseT_Full;
1426 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1427 bp->advertising |= ADVERTISED_100baseT_Half;
1428 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1429 bp->advertising |= ADVERTISED_100baseT_Full;
1430 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1431 bp->advertising |= ADVERTISED_1000baseT_Full;
1432 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1433 bp->advertising |= ADVERTISED_2500baseX_Full;
1434 } else {
1435 bp->autoneg = 0;
1436 bp->advertising = 0;
1437 bp->req_duplex = DUPLEX_FULL;
1438 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1439 bp->req_line_speed = SPEED_10;
1440 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1441 bp->req_duplex = DUPLEX_HALF;
1442 }
1443 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1444 bp->req_line_speed = SPEED_100;
1445 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1446 bp->req_duplex = DUPLEX_HALF;
1447 }
1448 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1449 bp->req_line_speed = SPEED_1000;
1450 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1451 bp->req_line_speed = SPEED_2500;
1452 }
1453}
1454
deaf391b
MC
1455static void
1456bnx2_set_default_link(struct bnx2 *bp)
1457{
0d8a6571
MC
1458 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1459 return bnx2_set_default_remote_link(bp);
1460
deaf391b
MC
1461 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1462 bp->req_line_speed = 0;
1463 if (bp->phy_flags & PHY_SERDES_FLAG) {
1464 u32 reg;
1465
1466 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1467
1468 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG);
1469 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1470 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1471 bp->autoneg = 0;
1472 bp->req_line_speed = bp->line_speed = SPEED_1000;
1473 bp->req_duplex = DUPLEX_FULL;
1474 }
1475 } else
1476 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1477}
1478
0d8a6571
MC
1479static void
1480bnx2_remote_phy_event(struct bnx2 *bp)
1481{
1482 u32 msg;
1483 u8 link_up = bp->link_up;
1484 u8 old_port;
1485
1486 msg = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
1487
1488 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1489 bp->link_up = 0;
1490 else {
1491 u32 speed;
1492
1493 bp->link_up = 1;
1494 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1495 bp->duplex = DUPLEX_FULL;
1496 switch (speed) {
1497 case BNX2_LINK_STATUS_10HALF:
1498 bp->duplex = DUPLEX_HALF;
1499 case BNX2_LINK_STATUS_10FULL:
1500 bp->line_speed = SPEED_10;
1501 break;
1502 case BNX2_LINK_STATUS_100HALF:
1503 bp->duplex = DUPLEX_HALF;
1504 case BNX2_LINK_STATUS_100BASE_T4:
1505 case BNX2_LINK_STATUS_100FULL:
1506 bp->line_speed = SPEED_100;
1507 break;
1508 case BNX2_LINK_STATUS_1000HALF:
1509 bp->duplex = DUPLEX_HALF;
1510 case BNX2_LINK_STATUS_1000FULL:
1511 bp->line_speed = SPEED_1000;
1512 break;
1513 case BNX2_LINK_STATUS_2500HALF:
1514 bp->duplex = DUPLEX_HALF;
1515 case BNX2_LINK_STATUS_2500FULL:
1516 bp->line_speed = SPEED_2500;
1517 break;
1518 default:
1519 bp->line_speed = 0;
1520 break;
1521 }
1522
1523 spin_lock(&bp->phy_lock);
1524 bp->flow_ctrl = 0;
1525 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1526 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1527 if (bp->duplex == DUPLEX_FULL)
1528 bp->flow_ctrl = bp->req_flow_ctrl;
1529 } else {
1530 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1531 bp->flow_ctrl |= FLOW_CTRL_TX;
1532 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1533 bp->flow_ctrl |= FLOW_CTRL_RX;
1534 }
1535
1536 old_port = bp->phy_port;
1537 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1538 bp->phy_port = PORT_FIBRE;
1539 else
1540 bp->phy_port = PORT_TP;
1541
1542 if (old_port != bp->phy_port)
1543 bnx2_set_default_link(bp);
1544
1545 spin_unlock(&bp->phy_lock);
1546 }
1547 if (bp->link_up != link_up)
1548 bnx2_report_link(bp);
1549
1550 bnx2_set_mac_link(bp);
1551}
1552
1553static int
1554bnx2_set_remote_link(struct bnx2 *bp)
1555{
1556 u32 evt_code;
1557
1558 evt_code = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_EVT_CODE_MB);
1559 switch (evt_code) {
1560 case BNX2_FW_EVT_CODE_LINK_EVENT:
1561 bnx2_remote_phy_event(bp);
1562 break;
1563 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1564 default:
1565 break;
1566 }
1567 return 0;
1568}
1569
b6016b76
MC
1570static int
1571bnx2_setup_copper_phy(struct bnx2 *bp)
1572{
1573 u32 bmcr;
1574 u32 new_bmcr;
1575
ca58c3af 1576 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1577
1578 if (bp->autoneg & AUTONEG_SPEED) {
1579 u32 adv_reg, adv1000_reg;
1580 u32 new_adv_reg = 0;
1581 u32 new_adv1000_reg = 0;
1582
ca58c3af 1583 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
b6016b76
MC
1584 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1585 ADVERTISE_PAUSE_ASYM);
1586
1587 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1588 adv1000_reg &= PHY_ALL_1000_SPEED;
1589
1590 if (bp->advertising & ADVERTISED_10baseT_Half)
1591 new_adv_reg |= ADVERTISE_10HALF;
1592 if (bp->advertising & ADVERTISED_10baseT_Full)
1593 new_adv_reg |= ADVERTISE_10FULL;
1594 if (bp->advertising & ADVERTISED_100baseT_Half)
1595 new_adv_reg |= ADVERTISE_100HALF;
1596 if (bp->advertising & ADVERTISED_100baseT_Full)
1597 new_adv_reg |= ADVERTISE_100FULL;
1598 if (bp->advertising & ADVERTISED_1000baseT_Full)
1599 new_adv1000_reg |= ADVERTISE_1000FULL;
6aa20a22 1600
b6016b76
MC
1601 new_adv_reg |= ADVERTISE_CSMA;
1602
1603 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1604
1605 if ((adv1000_reg != new_adv1000_reg) ||
1606 (adv_reg != new_adv_reg) ||
1607 ((bmcr & BMCR_ANENABLE) == 0)) {
1608
ca58c3af 1609 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
b6016b76 1610 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
ca58c3af 1611 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
b6016b76
MC
1612 BMCR_ANENABLE);
1613 }
1614 else if (bp->link_up) {
1615 /* Flow ctrl may have changed from auto to forced */
1616 /* or vice-versa. */
1617
1618 bnx2_resolve_flow_ctrl(bp);
1619 bnx2_set_mac_link(bp);
1620 }
1621 return 0;
1622 }
1623
1624 new_bmcr = 0;
1625 if (bp->req_line_speed == SPEED_100) {
1626 new_bmcr |= BMCR_SPEED100;
1627 }
1628 if (bp->req_duplex == DUPLEX_FULL) {
1629 new_bmcr |= BMCR_FULLDPLX;
1630 }
1631 if (new_bmcr != bmcr) {
1632 u32 bmsr;
b6016b76 1633
ca58c3af
MC
1634 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1635 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
6aa20a22 1636
b6016b76
MC
1637 if (bmsr & BMSR_LSTATUS) {
1638 /* Force link down */
ca58c3af 1639 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
a16dda0e
MC
1640 spin_unlock_bh(&bp->phy_lock);
1641 msleep(50);
1642 spin_lock_bh(&bp->phy_lock);
1643
ca58c3af
MC
1644 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1645 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
b6016b76
MC
1646 }
1647
ca58c3af 1648 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
b6016b76
MC
1649
1650 /* Normally, the new speed is setup after the link has
1651 * gone down and up again. In some cases, link will not go
1652 * down so we need to set up the new speed here.
1653 */
1654 if (bmsr & BMSR_LSTATUS) {
1655 bp->line_speed = bp->req_line_speed;
1656 bp->duplex = bp->req_duplex;
1657 bnx2_resolve_flow_ctrl(bp);
1658 bnx2_set_mac_link(bp);
1659 }
27a005b8
MC
1660 } else {
1661 bnx2_resolve_flow_ctrl(bp);
1662 bnx2_set_mac_link(bp);
b6016b76
MC
1663 }
1664 return 0;
1665}
1666
1667static int
0d8a6571 1668bnx2_setup_phy(struct bnx2 *bp, u8 port)
b6016b76
MC
1669{
1670 if (bp->loopback == MAC_LOOPBACK)
1671 return 0;
1672
1673 if (bp->phy_flags & PHY_SERDES_FLAG) {
0d8a6571 1674 return (bnx2_setup_serdes_phy(bp, port));
b6016b76
MC
1675 }
1676 else {
1677 return (bnx2_setup_copper_phy(bp));
1678 }
1679}
1680
27a005b8
MC
1681static int
1682bnx2_init_5709s_phy(struct bnx2 *bp)
1683{
1684 u32 val;
1685
1686 bp->mii_bmcr = MII_BMCR + 0x10;
1687 bp->mii_bmsr = MII_BMSR + 0x10;
1688 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1689 bp->mii_adv = MII_ADVERTISE + 0x10;
1690 bp->mii_lpa = MII_LPA + 0x10;
1691 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1692
1693 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1694 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
1695
1696 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1697 bnx2_reset_phy(bp);
1698
1699 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
1700
1701 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
1702 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
1703 val |= MII_BNX2_SD_1000XCTL1_FIBER;
1704 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
1705
1706 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1707 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
1708 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
1709 val |= BCM5708S_UP1_2G5;
1710 else
1711 val &= ~BCM5708S_UP1_2G5;
1712 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
1713
1714 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
1715 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
1716 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
1717 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
1718
1719 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
1720
1721 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
1722 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
1723 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
1724
1725 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1726
1727 return 0;
1728}
1729
b6016b76 1730static int
5b0c76ad
MC
1731bnx2_init_5708s_phy(struct bnx2 *bp)
1732{
1733 u32 val;
1734
27a005b8
MC
1735 bnx2_reset_phy(bp);
1736
1737 bp->mii_up1 = BCM5708S_UP1;
1738
5b0c76ad
MC
1739 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
1740 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
1741 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1742
1743 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
1744 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
1745 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
1746
1747 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
1748 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
1749 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
1750
1751 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) {
1752 bnx2_read_phy(bp, BCM5708S_UP1, &val);
1753 val |= BCM5708S_UP1_2G5;
1754 bnx2_write_phy(bp, BCM5708S_UP1, val);
1755 }
1756
1757 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
dda1e390
MC
1758 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
1759 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
5b0c76ad
MC
1760 /* increase tx signal amplitude */
1761 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1762 BCM5708S_BLK_ADDR_TX_MISC);
1763 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
1764 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
1765 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
1766 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
1767 }
1768
e3648b3d 1769 val = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_CONFIG) &
5b0c76ad
MC
1770 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
1771
1772 if (val) {
1773 u32 is_backplane;
1774
e3648b3d 1775 is_backplane = REG_RD_IND(bp, bp->shmem_base +
5b0c76ad
MC
1776 BNX2_SHARED_HW_CFG_CONFIG);
1777 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
1778 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1779 BCM5708S_BLK_ADDR_TX_MISC);
1780 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
1781 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
1782 BCM5708S_BLK_ADDR_DIG);
1783 }
1784 }
1785 return 0;
1786}
1787
1788static int
1789bnx2_init_5706s_phy(struct bnx2 *bp)
b6016b76 1790{
27a005b8
MC
1791 bnx2_reset_phy(bp);
1792
b6016b76
MC
1793 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
1794
59b47d8a
MC
1795 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1796 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
b6016b76
MC
1797
1798 if (bp->dev->mtu > 1500) {
1799 u32 val;
1800
1801 /* Set extended packet length bit */
1802 bnx2_write_phy(bp, 0x18, 0x7);
1803 bnx2_read_phy(bp, 0x18, &val);
1804 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
1805
1806 bnx2_write_phy(bp, 0x1c, 0x6c00);
1807 bnx2_read_phy(bp, 0x1c, &val);
1808 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
1809 }
1810 else {
1811 u32 val;
1812
1813 bnx2_write_phy(bp, 0x18, 0x7);
1814 bnx2_read_phy(bp, 0x18, &val);
1815 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1816
1817 bnx2_write_phy(bp, 0x1c, 0x6c00);
1818 bnx2_read_phy(bp, 0x1c, &val);
1819 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
1820 }
1821
1822 return 0;
1823}
1824
1825static int
1826bnx2_init_copper_phy(struct bnx2 *bp)
1827{
5b0c76ad
MC
1828 u32 val;
1829
27a005b8
MC
1830 bnx2_reset_phy(bp);
1831
b6016b76
MC
1832 if (bp->phy_flags & PHY_CRC_FIX_FLAG) {
1833 bnx2_write_phy(bp, 0x18, 0x0c00);
1834 bnx2_write_phy(bp, 0x17, 0x000a);
1835 bnx2_write_phy(bp, 0x15, 0x310b);
1836 bnx2_write_phy(bp, 0x17, 0x201f);
1837 bnx2_write_phy(bp, 0x15, 0x9506);
1838 bnx2_write_phy(bp, 0x17, 0x401f);
1839 bnx2_write_phy(bp, 0x15, 0x14e2);
1840 bnx2_write_phy(bp, 0x18, 0x0400);
1841 }
1842
b659f44e
MC
1843 if (bp->phy_flags & PHY_DIS_EARLY_DAC_FLAG) {
1844 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
1845 MII_BNX2_DSP_EXPAND_REG | 0x8);
1846 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1847 val &= ~(1 << 8);
1848 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
1849 }
1850
b6016b76 1851 if (bp->dev->mtu > 1500) {
b6016b76
MC
1852 /* Set extended packet length bit */
1853 bnx2_write_phy(bp, 0x18, 0x7);
1854 bnx2_read_phy(bp, 0x18, &val);
1855 bnx2_write_phy(bp, 0x18, val | 0x4000);
1856
1857 bnx2_read_phy(bp, 0x10, &val);
1858 bnx2_write_phy(bp, 0x10, val | 0x1);
1859 }
1860 else {
b6016b76
MC
1861 bnx2_write_phy(bp, 0x18, 0x7);
1862 bnx2_read_phy(bp, 0x18, &val);
1863 bnx2_write_phy(bp, 0x18, val & ~0x4007);
1864
1865 bnx2_read_phy(bp, 0x10, &val);
1866 bnx2_write_phy(bp, 0x10, val & ~0x1);
1867 }
1868
5b0c76ad
MC
1869 /* ethernet@wirespeed */
1870 bnx2_write_phy(bp, 0x18, 0x7007);
1871 bnx2_read_phy(bp, 0x18, &val);
1872 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
b6016b76
MC
1873 return 0;
1874}
1875
1876
1877static int
1878bnx2_init_phy(struct bnx2 *bp)
1879{
1880 u32 val;
1881 int rc = 0;
1882
1883 bp->phy_flags &= ~PHY_INT_MODE_MASK_FLAG;
1884 bp->phy_flags |= PHY_INT_MODE_LINK_READY_FLAG;
1885
ca58c3af
MC
1886 bp->mii_bmcr = MII_BMCR;
1887 bp->mii_bmsr = MII_BMSR;
27a005b8 1888 bp->mii_bmsr1 = MII_BMSR;
ca58c3af
MC
1889 bp->mii_adv = MII_ADVERTISE;
1890 bp->mii_lpa = MII_LPA;
1891
b6016b76
MC
1892 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
1893
0d8a6571
MC
1894 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
1895 goto setup_phy;
1896
b6016b76
MC
1897 bnx2_read_phy(bp, MII_PHYSID1, &val);
1898 bp->phy_id = val << 16;
1899 bnx2_read_phy(bp, MII_PHYSID2, &val);
1900 bp->phy_id |= val & 0xffff;
1901
1902 if (bp->phy_flags & PHY_SERDES_FLAG) {
5b0c76ad
MC
1903 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1904 rc = bnx2_init_5706s_phy(bp);
1905 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1906 rc = bnx2_init_5708s_phy(bp);
27a005b8
MC
1907 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1908 rc = bnx2_init_5709s_phy(bp);
b6016b76
MC
1909 }
1910 else {
1911 rc = bnx2_init_copper_phy(bp);
1912 }
1913
0d8a6571
MC
1914setup_phy:
1915 if (!rc)
1916 rc = bnx2_setup_phy(bp, bp->phy_port);
b6016b76
MC
1917
1918 return rc;
1919}
1920
1921static int
1922bnx2_set_mac_loopback(struct bnx2 *bp)
1923{
1924 u32 mac_mode;
1925
1926 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1927 mac_mode &= ~BNX2_EMAC_MODE_PORT;
1928 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
1929 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1930 bp->link_up = 1;
1931 return 0;
1932}
1933
bc5a0690
MC
1934static int bnx2_test_link(struct bnx2 *);
1935
1936static int
1937bnx2_set_phy_loopback(struct bnx2 *bp)
1938{
1939 u32 mac_mode;
1940 int rc, i;
1941
1942 spin_lock_bh(&bp->phy_lock);
ca58c3af 1943 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
bc5a0690
MC
1944 BMCR_SPEED1000);
1945 spin_unlock_bh(&bp->phy_lock);
1946 if (rc)
1947 return rc;
1948
1949 for (i = 0; i < 10; i++) {
1950 if (bnx2_test_link(bp) == 0)
1951 break;
80be4434 1952 msleep(100);
bc5a0690
MC
1953 }
1954
1955 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
1956 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1957 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
59b47d8a 1958 BNX2_EMAC_MODE_25G_MODE);
bc5a0690
MC
1959
1960 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
1961 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
1962 bp->link_up = 1;
1963 return 0;
1964}
1965
b6016b76 1966static int
b090ae2b 1967bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int silent)
b6016b76
MC
1968{
1969 int i;
1970 u32 val;
1971
b6016b76
MC
1972 bp->fw_wr_seq++;
1973 msg_data |= bp->fw_wr_seq;
1974
e3648b3d 1975 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
b6016b76
MC
1976
1977 /* wait for an acknowledgement. */
b090ae2b
MC
1978 for (i = 0; i < (FW_ACK_TIME_OUT_MS / 10); i++) {
1979 msleep(10);
b6016b76 1980
e3648b3d 1981 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_MB);
b6016b76
MC
1982
1983 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
1984 break;
1985 }
b090ae2b
MC
1986 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
1987 return 0;
b6016b76
MC
1988
1989 /* If we timed out, inform the firmware that this is the case. */
b090ae2b
MC
1990 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
1991 if (!silent)
1992 printk(KERN_ERR PFX "fw sync timeout, reset code = "
1993 "%x\n", msg_data);
b6016b76
MC
1994
1995 msg_data &= ~BNX2_DRV_MSG_CODE;
1996 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
1997
e3648b3d 1998 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_MB, msg_data);
b6016b76 1999
b6016b76
MC
2000 return -EBUSY;
2001 }
2002
b090ae2b
MC
2003 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2004 return -EIO;
2005
b6016b76
MC
2006 return 0;
2007}
2008
59b47d8a
MC
2009static int
2010bnx2_init_5709_context(struct bnx2 *bp)
2011{
2012 int i, ret = 0;
2013 u32 val;
2014
2015 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2016 val |= (BCM_PAGE_BITS - 8) << 16;
2017 REG_WR(bp, BNX2_CTX_COMMAND, val);
641bdcd5
MC
2018 for (i = 0; i < 10; i++) {
2019 val = REG_RD(bp, BNX2_CTX_COMMAND);
2020 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2021 break;
2022 udelay(2);
2023 }
2024 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2025 return -EBUSY;
2026
59b47d8a
MC
2027 for (i = 0; i < bp->ctx_pages; i++) {
2028 int j;
2029
2030 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2031 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2032 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2033 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2034 (u64) bp->ctx_blk_mapping[i] >> 32);
2035 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2036 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2037 for (j = 0; j < 10; j++) {
2038
2039 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2040 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2041 break;
2042 udelay(5);
2043 }
2044 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2045 ret = -EBUSY;
2046 break;
2047 }
2048 }
2049 return ret;
2050}
2051
b6016b76
MC
2052static void
2053bnx2_init_context(struct bnx2 *bp)
2054{
2055 u32 vcid;
2056
2057 vcid = 96;
2058 while (vcid) {
2059 u32 vcid_addr, pcid_addr, offset;
7947b20e 2060 int i;
b6016b76
MC
2061
2062 vcid--;
2063
2064 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2065 u32 new_vcid;
2066
2067 vcid_addr = GET_PCID_ADDR(vcid);
2068 if (vcid & 0x8) {
2069 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2070 }
2071 else {
2072 new_vcid = vcid;
2073 }
2074 pcid_addr = GET_PCID_ADDR(new_vcid);
2075 }
2076 else {
2077 vcid_addr = GET_CID_ADDR(vcid);
2078 pcid_addr = vcid_addr;
2079 }
2080
7947b20e
MC
2081 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2082 vcid_addr += (i << PHY_CTX_SHIFT);
2083 pcid_addr += (i << PHY_CTX_SHIFT);
b6016b76 2084
7947b20e
MC
2085 REG_WR(bp, BNX2_CTX_VIRT_ADDR, 0x00);
2086 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
b6016b76 2087
7947b20e
MC
2088 /* Zero out the context. */
2089 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2090 CTX_WR(bp, 0x00, offset, 0);
2091
2092 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2093 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2094 }
b6016b76
MC
2095 }
2096}
2097
2098static int
2099bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2100{
2101 u16 *good_mbuf;
2102 u32 good_mbuf_cnt;
2103 u32 val;
2104
2105 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2106 if (good_mbuf == NULL) {
2107 printk(KERN_ERR PFX "Failed to allocate memory in "
2108 "bnx2_alloc_bad_rbuf\n");
2109 return -ENOMEM;
2110 }
2111
2112 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2113 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2114
2115 good_mbuf_cnt = 0;
2116
2117 /* Allocate a bunch of mbufs and save the good ones in an array. */
2118 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2119 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2120 REG_WR_IND(bp, BNX2_RBUF_COMMAND, BNX2_RBUF_COMMAND_ALLOC_REQ);
2121
2122 val = REG_RD_IND(bp, BNX2_RBUF_FW_BUF_ALLOC);
2123
2124 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2125
2126 /* The addresses with Bit 9 set are bad memory blocks. */
2127 if (!(val & (1 << 9))) {
2128 good_mbuf[good_mbuf_cnt] = (u16) val;
2129 good_mbuf_cnt++;
2130 }
2131
2132 val = REG_RD_IND(bp, BNX2_RBUF_STATUS1);
2133 }
2134
2135 /* Free the good ones back to the mbuf pool thus discarding
2136 * all the bad ones. */
2137 while (good_mbuf_cnt) {
2138 good_mbuf_cnt--;
2139
2140 val = good_mbuf[good_mbuf_cnt];
2141 val = (val << 9) | val | 1;
2142
2143 REG_WR_IND(bp, BNX2_RBUF_FW_BUF_FREE, val);
2144 }
2145 kfree(good_mbuf);
2146 return 0;
2147}
2148
2149static void
6aa20a22 2150bnx2_set_mac_addr(struct bnx2 *bp)
b6016b76
MC
2151{
2152 u32 val;
2153 u8 *mac_addr = bp->dev->dev_addr;
2154
2155 val = (mac_addr[0] << 8) | mac_addr[1];
2156
2157 REG_WR(bp, BNX2_EMAC_MAC_MATCH0, val);
2158
6aa20a22 2159 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
b6016b76
MC
2160 (mac_addr[4] << 8) | mac_addr[5];
2161
2162 REG_WR(bp, BNX2_EMAC_MAC_MATCH1, val);
2163}
2164
2165static inline int
2166bnx2_alloc_rx_skb(struct bnx2 *bp, u16 index)
2167{
2168 struct sk_buff *skb;
2169 struct sw_bd *rx_buf = &bp->rx_buf_ring[index];
2170 dma_addr_t mapping;
13daffa2 2171 struct rx_bd *rxbd = &bp->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
b6016b76
MC
2172 unsigned long align;
2173
932f3772 2174 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
b6016b76
MC
2175 if (skb == NULL) {
2176 return -ENOMEM;
2177 }
2178
59b47d8a
MC
2179 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2180 skb_reserve(skb, BNX2_RX_ALIGN - align);
b6016b76 2181
b6016b76
MC
2182 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2183 PCI_DMA_FROMDEVICE);
2184
2185 rx_buf->skb = skb;
2186 pci_unmap_addr_set(rx_buf, mapping, mapping);
2187
2188 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2189 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2190
2191 bp->rx_prod_bseq += bp->rx_buf_use_size;
2192
2193 return 0;
2194}
2195
da3e4fbe
MC
2196static int
2197bnx2_phy_event_is_set(struct bnx2 *bp, u32 event)
b6016b76 2198{
da3e4fbe 2199 struct status_block *sblk = bp->status_blk;
b6016b76 2200 u32 new_link_state, old_link_state;
da3e4fbe 2201 int is_set = 1;
b6016b76 2202
da3e4fbe
MC
2203 new_link_state = sblk->status_attn_bits & event;
2204 old_link_state = sblk->status_attn_bits_ack & event;
b6016b76 2205 if (new_link_state != old_link_state) {
da3e4fbe
MC
2206 if (new_link_state)
2207 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2208 else
2209 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2210 } else
2211 is_set = 0;
2212
2213 return is_set;
2214}
2215
2216static void
2217bnx2_phy_int(struct bnx2 *bp)
2218{
2219 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_LINK_STATE)) {
2220 spin_lock(&bp->phy_lock);
b6016b76 2221 bnx2_set_link(bp);
da3e4fbe 2222 spin_unlock(&bp->phy_lock);
b6016b76 2223 }
0d8a6571
MC
2224 if (bnx2_phy_event_is_set(bp, STATUS_ATTN_BITS_TIMER_ABORT))
2225 bnx2_set_remote_link(bp);
2226
b6016b76
MC
2227}
2228
2229static void
2230bnx2_tx_int(struct bnx2 *bp)
2231{
f4e418f7 2232 struct status_block *sblk = bp->status_blk;
b6016b76
MC
2233 u16 hw_cons, sw_cons, sw_ring_cons;
2234 int tx_free_bd = 0;
2235
f4e418f7 2236 hw_cons = bp->hw_tx_cons = sblk->status_tx_quick_consumer_index0;
b6016b76
MC
2237 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2238 hw_cons++;
2239 }
2240 sw_cons = bp->tx_cons;
2241
2242 while (sw_cons != hw_cons) {
2243 struct sw_bd *tx_buf;
2244 struct sk_buff *skb;
2245 int i, last;
2246
2247 sw_ring_cons = TX_RING_IDX(sw_cons);
2248
2249 tx_buf = &bp->tx_buf_ring[sw_ring_cons];
2250 skb = tx_buf->skb;
1d39ed56 2251
b6016b76 2252 /* partial BD completions possible with TSO packets */
89114afd 2253 if (skb_is_gso(skb)) {
b6016b76
MC
2254 u16 last_idx, last_ring_idx;
2255
2256 last_idx = sw_cons +
2257 skb_shinfo(skb)->nr_frags + 1;
2258 last_ring_idx = sw_ring_cons +
2259 skb_shinfo(skb)->nr_frags + 1;
2260 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2261 last_idx++;
2262 }
2263 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2264 break;
2265 }
2266 }
1d39ed56 2267
b6016b76
MC
2268 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
2269 skb_headlen(skb), PCI_DMA_TODEVICE);
2270
2271 tx_buf->skb = NULL;
2272 last = skb_shinfo(skb)->nr_frags;
2273
2274 for (i = 0; i < last; i++) {
2275 sw_cons = NEXT_TX_BD(sw_cons);
2276
2277 pci_unmap_page(bp->pdev,
2278 pci_unmap_addr(
2279 &bp->tx_buf_ring[TX_RING_IDX(sw_cons)],
2280 mapping),
2281 skb_shinfo(skb)->frags[i].size,
2282 PCI_DMA_TODEVICE);
2283 }
2284
2285 sw_cons = NEXT_TX_BD(sw_cons);
2286
2287 tx_free_bd += last + 1;
2288
745720e5 2289 dev_kfree_skb(skb);
b6016b76 2290
f4e418f7
MC
2291 hw_cons = bp->hw_tx_cons =
2292 sblk->status_tx_quick_consumer_index0;
2293
b6016b76
MC
2294 if ((hw_cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT) {
2295 hw_cons++;
2296 }
2297 }
2298
e89bbf10 2299 bp->tx_cons = sw_cons;
2f8af120
MC
2300 /* Need to make the tx_cons update visible to bnx2_start_xmit()
2301 * before checking for netif_queue_stopped(). Without the
2302 * memory barrier, there is a small possibility that bnx2_start_xmit()
2303 * will miss it and cause the queue to be stopped forever.
2304 */
2305 smp_mb();
b6016b76 2306
2f8af120
MC
2307 if (unlikely(netif_queue_stopped(bp->dev)) &&
2308 (bnx2_tx_avail(bp) > bp->tx_wake_thresh)) {
2309 netif_tx_lock(bp->dev);
b6016b76 2310 if ((netif_queue_stopped(bp->dev)) &&
2f8af120 2311 (bnx2_tx_avail(bp) > bp->tx_wake_thresh))
b6016b76 2312 netif_wake_queue(bp->dev);
2f8af120 2313 netif_tx_unlock(bp->dev);
b6016b76 2314 }
b6016b76
MC
2315}
2316
2317static inline void
2318bnx2_reuse_rx_skb(struct bnx2 *bp, struct sk_buff *skb,
2319 u16 cons, u16 prod)
2320{
236b6394
MC
2321 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2322 struct rx_bd *cons_bd, *prod_bd;
2323
2324 cons_rx_buf = &bp->rx_buf_ring[cons];
2325 prod_rx_buf = &bp->rx_buf_ring[prod];
b6016b76
MC
2326
2327 pci_dma_sync_single_for_device(bp->pdev,
2328 pci_unmap_addr(cons_rx_buf, mapping),
2329 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2330
236b6394 2331 bp->rx_prod_bseq += bp->rx_buf_use_size;
b6016b76 2332
236b6394 2333 prod_rx_buf->skb = skb;
b6016b76 2334
236b6394
MC
2335 if (cons == prod)
2336 return;
b6016b76 2337
236b6394
MC
2338 pci_unmap_addr_set(prod_rx_buf, mapping,
2339 pci_unmap_addr(cons_rx_buf, mapping));
2340
3fdfcc2c
MC
2341 cons_bd = &bp->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2342 prod_bd = &bp->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
236b6394
MC
2343 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2344 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
b6016b76
MC
2345}
2346
2347static int
2348bnx2_rx_int(struct bnx2 *bp, int budget)
2349{
f4e418f7 2350 struct status_block *sblk = bp->status_blk;
b6016b76
MC
2351 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2352 struct l2_fhdr *rx_hdr;
2353 int rx_pkt = 0;
2354
f4e418f7 2355 hw_cons = bp->hw_rx_cons = sblk->status_rx_quick_consumer_index0;
b6016b76
MC
2356 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT) {
2357 hw_cons++;
2358 }
2359 sw_cons = bp->rx_cons;
2360 sw_prod = bp->rx_prod;
2361
2362 /* Memory barrier necessary as speculative reads of the rx
2363 * buffer can be ahead of the index in the status block
2364 */
2365 rmb();
2366 while (sw_cons != hw_cons) {
2367 unsigned int len;
ade2bfe7 2368 u32 status;
b6016b76
MC
2369 struct sw_bd *rx_buf;
2370 struct sk_buff *skb;
236b6394 2371 dma_addr_t dma_addr;
b6016b76
MC
2372
2373 sw_ring_cons = RX_RING_IDX(sw_cons);
2374 sw_ring_prod = RX_RING_IDX(sw_prod);
2375
2376 rx_buf = &bp->rx_buf_ring[sw_ring_cons];
2377 skb = rx_buf->skb;
236b6394
MC
2378
2379 rx_buf->skb = NULL;
2380
2381 dma_addr = pci_unmap_addr(rx_buf, mapping);
2382
2383 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
b6016b76
MC
2384 bp->rx_offset + RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2385
2386 rx_hdr = (struct l2_fhdr *) skb->data;
2387 len = rx_hdr->l2_fhdr_pkt_len - 4;
2388
ade2bfe7 2389 if ((status = rx_hdr->l2_fhdr_status) &
b6016b76
MC
2390 (L2_FHDR_ERRORS_BAD_CRC |
2391 L2_FHDR_ERRORS_PHY_DECODE |
2392 L2_FHDR_ERRORS_ALIGNMENT |
2393 L2_FHDR_ERRORS_TOO_SHORT |
2394 L2_FHDR_ERRORS_GIANT_FRAME)) {
2395
2396 goto reuse_rx;
2397 }
2398
2399 /* Since we don't have a jumbo ring, copy small packets
2400 * if mtu > 1500
2401 */
2402 if ((bp->dev->mtu > 1500) && (len <= RX_COPY_THRESH)) {
2403 struct sk_buff *new_skb;
2404
932f3772 2405 new_skb = netdev_alloc_skb(bp->dev, len + 2);
b6016b76
MC
2406 if (new_skb == NULL)
2407 goto reuse_rx;
2408
2409 /* aligned copy */
d626f62b
ACM
2410 skb_copy_from_linear_data_offset(skb, bp->rx_offset - 2,
2411 new_skb->data, len + 2);
b6016b76
MC
2412 skb_reserve(new_skb, 2);
2413 skb_put(new_skb, len);
b6016b76
MC
2414
2415 bnx2_reuse_rx_skb(bp, skb,
2416 sw_ring_cons, sw_ring_prod);
2417
2418 skb = new_skb;
2419 }
2420 else if (bnx2_alloc_rx_skb(bp, sw_ring_prod) == 0) {
236b6394 2421 pci_unmap_single(bp->pdev, dma_addr,
b6016b76
MC
2422 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
2423
2424 skb_reserve(skb, bp->rx_offset);
2425 skb_put(skb, len);
2426 }
2427 else {
2428reuse_rx:
2429 bnx2_reuse_rx_skb(bp, skb,
2430 sw_ring_cons, sw_ring_prod);
2431 goto next_rx;
2432 }
2433
2434 skb->protocol = eth_type_trans(skb, bp->dev);
2435
2436 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
d1e100ba 2437 (ntohs(skb->protocol) != 0x8100)) {
b6016b76 2438
745720e5 2439 dev_kfree_skb(skb);
b6016b76
MC
2440 goto next_rx;
2441
2442 }
2443
b6016b76
MC
2444 skb->ip_summed = CHECKSUM_NONE;
2445 if (bp->rx_csum &&
2446 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
2447 L2_FHDR_STATUS_UDP_DATAGRAM))) {
2448
ade2bfe7
MC
2449 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
2450 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
b6016b76
MC
2451 skb->ip_summed = CHECKSUM_UNNECESSARY;
2452 }
2453
2454#ifdef BCM_VLAN
2455 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) && (bp->vlgrp != 0)) {
2456 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
2457 rx_hdr->l2_fhdr_vlan_tag);
2458 }
2459 else
2460#endif
2461 netif_receive_skb(skb);
2462
2463 bp->dev->last_rx = jiffies;
2464 rx_pkt++;
2465
2466next_rx:
b6016b76
MC
2467 sw_cons = NEXT_RX_BD(sw_cons);
2468 sw_prod = NEXT_RX_BD(sw_prod);
2469
2470 if ((rx_pkt == budget))
2471 break;
f4e418f7
MC
2472
2473 /* Refresh hw_cons to see if there is new work */
2474 if (sw_cons == hw_cons) {
2475 hw_cons = bp->hw_rx_cons =
2476 sblk->status_rx_quick_consumer_index0;
2477 if ((hw_cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT)
2478 hw_cons++;
2479 rmb();
2480 }
b6016b76
MC
2481 }
2482 bp->rx_cons = sw_cons;
2483 bp->rx_prod = sw_prod;
2484
2485 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, sw_prod);
2486
2487 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
2488
2489 mmiowb();
2490
2491 return rx_pkt;
2492
2493}
2494
2495/* MSI ISR - The only difference between this and the INTx ISR
2496 * is that the MSI interrupt is always serviced.
2497 */
2498static irqreturn_t
7d12e780 2499bnx2_msi(int irq, void *dev_instance)
b6016b76
MC
2500{
2501 struct net_device *dev = dev_instance;
972ec0d4 2502 struct bnx2 *bp = netdev_priv(dev);
b6016b76 2503
c921e4c4 2504 prefetch(bp->status_blk);
b6016b76
MC
2505 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2506 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2507 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2508
2509 /* Return here if interrupt is disabled. */
73eef4cd
MC
2510 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2511 return IRQ_HANDLED;
b6016b76 2512
73eef4cd 2513 netif_rx_schedule(dev);
b6016b76 2514
73eef4cd 2515 return IRQ_HANDLED;
b6016b76
MC
2516}
2517
8e6a72c4
MC
2518static irqreturn_t
2519bnx2_msi_1shot(int irq, void *dev_instance)
2520{
2521 struct net_device *dev = dev_instance;
2522 struct bnx2 *bp = netdev_priv(dev);
2523
2524 prefetch(bp->status_blk);
2525
2526 /* Return here if interrupt is disabled. */
2527 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2528 return IRQ_HANDLED;
2529
2530 netif_rx_schedule(dev);
2531
2532 return IRQ_HANDLED;
2533}
2534
b6016b76 2535static irqreturn_t
7d12e780 2536bnx2_interrupt(int irq, void *dev_instance)
b6016b76
MC
2537{
2538 struct net_device *dev = dev_instance;
972ec0d4 2539 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
2540
2541 /* When using INTx, it is possible for the interrupt to arrive
2542 * at the CPU before the status block posted prior to the
2543 * interrupt. Reading a register will flush the status block.
2544 * When using MSI, the MSI message will always complete after
2545 * the status block write.
2546 */
c921e4c4 2547 if ((bp->status_blk->status_idx == bp->last_status_idx) &&
b6016b76
MC
2548 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
2549 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
73eef4cd 2550 return IRQ_NONE;
b6016b76
MC
2551
2552 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2553 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
2554 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
2555
2556 /* Return here if interrupt is shared and is disabled. */
73eef4cd
MC
2557 if (unlikely(atomic_read(&bp->intr_sem) != 0))
2558 return IRQ_HANDLED;
b6016b76 2559
73eef4cd 2560 netif_rx_schedule(dev);
b6016b76 2561
73eef4cd 2562 return IRQ_HANDLED;
b6016b76
MC
2563}
2564
0d8a6571
MC
2565#define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
2566 STATUS_ATTN_BITS_TIMER_ABORT)
da3e4fbe 2567
f4e418f7
MC
2568static inline int
2569bnx2_has_work(struct bnx2 *bp)
2570{
2571 struct status_block *sblk = bp->status_blk;
2572
2573 if ((sblk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) ||
2574 (sblk->status_tx_quick_consumer_index0 != bp->hw_tx_cons))
2575 return 1;
2576
da3e4fbe
MC
2577 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
2578 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
f4e418f7
MC
2579 return 1;
2580
2581 return 0;
2582}
2583
b6016b76
MC
2584static int
2585bnx2_poll(struct net_device *dev, int *budget)
2586{
972ec0d4 2587 struct bnx2 *bp = netdev_priv(dev);
da3e4fbe
MC
2588 struct status_block *sblk = bp->status_blk;
2589 u32 status_attn_bits = sblk->status_attn_bits;
2590 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
b6016b76 2591
da3e4fbe
MC
2592 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
2593 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
b6016b76 2594
b6016b76 2595 bnx2_phy_int(bp);
bf5295bb
MC
2596
2597 /* This is needed to take care of transient status
2598 * during link changes.
2599 */
2600 REG_WR(bp, BNX2_HC_COMMAND,
2601 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
2602 REG_RD(bp, BNX2_HC_COMMAND);
b6016b76
MC
2603 }
2604
f4e418f7 2605 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->hw_tx_cons)
b6016b76 2606 bnx2_tx_int(bp);
b6016b76 2607
f4e418f7 2608 if (bp->status_blk->status_rx_quick_consumer_index0 != bp->hw_rx_cons) {
b6016b76
MC
2609 int orig_budget = *budget;
2610 int work_done;
2611
2612 if (orig_budget > dev->quota)
2613 orig_budget = dev->quota;
6aa20a22 2614
b6016b76
MC
2615 work_done = bnx2_rx_int(bp, orig_budget);
2616 *budget -= work_done;
2617 dev->quota -= work_done;
b6016b76 2618 }
6aa20a22 2619
f4e418f7
MC
2620 bp->last_status_idx = bp->status_blk->status_idx;
2621 rmb();
2622
2623 if (!bnx2_has_work(bp)) {
b6016b76 2624 netif_rx_complete(dev);
1269a8a6
MC
2625 if (likely(bp->flags & USING_MSI_FLAG)) {
2626 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2627 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2628 bp->last_status_idx);
2629 return 0;
2630 }
2631 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
2632 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2633 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
2634 bp->last_status_idx);
2635
b6016b76 2636 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
1269a8a6
MC
2637 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
2638 bp->last_status_idx);
b6016b76
MC
2639 return 0;
2640 }
2641
2642 return 1;
2643}
2644
932ff279 2645/* Called with rtnl_lock from vlan functions and also netif_tx_lock
b6016b76
MC
2646 * from set_multicast.
2647 */
2648static void
2649bnx2_set_rx_mode(struct net_device *dev)
2650{
972ec0d4 2651 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
2652 u32 rx_mode, sort_mode;
2653 int i;
b6016b76 2654
c770a65c 2655 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
2656
2657 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
2658 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
2659 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
2660#ifdef BCM_VLAN
e29054f9 2661 if (!bp->vlgrp && !(bp->flags & ASF_ENABLE_FLAG))
b6016b76 2662 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
b6016b76 2663#else
e29054f9
MC
2664 if (!(bp->flags & ASF_ENABLE_FLAG))
2665 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
b6016b76
MC
2666#endif
2667 if (dev->flags & IFF_PROMISC) {
2668 /* Promiscuous mode. */
2669 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
7510873d
MC
2670 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
2671 BNX2_RPM_SORT_USER0_PROM_VLAN;
b6016b76
MC
2672 }
2673 else if (dev->flags & IFF_ALLMULTI) {
2674 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2675 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2676 0xffffffff);
2677 }
2678 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
2679 }
2680 else {
2681 /* Accept one or more multicast(s). */
2682 struct dev_mc_list *mclist;
2683 u32 mc_filter[NUM_MC_HASH_REGISTERS];
2684 u32 regidx;
2685 u32 bit;
2686 u32 crc;
2687
2688 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
2689
2690 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
2691 i++, mclist = mclist->next) {
2692
2693 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
2694 bit = crc & 0xff;
2695 regidx = (bit & 0xe0) >> 5;
2696 bit &= 0x1f;
2697 mc_filter[regidx] |= (1 << bit);
2698 }
2699
2700 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
2701 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
2702 mc_filter[i]);
2703 }
2704
2705 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
2706 }
2707
2708 if (rx_mode != bp->rx_mode) {
2709 bp->rx_mode = rx_mode;
2710 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
2711 }
2712
2713 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
2714 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
2715 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
2716
c770a65c 2717 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
2718}
2719
fba9fe91
MC
2720#define FW_BUF_SIZE 0x8000
2721
2722static int
2723bnx2_gunzip_init(struct bnx2 *bp)
2724{
2725 if ((bp->gunzip_buf = vmalloc(FW_BUF_SIZE)) == NULL)
2726 goto gunzip_nomem1;
2727
2728 if ((bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL)) == NULL)
2729 goto gunzip_nomem2;
2730
2731 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(), GFP_KERNEL);
2732 if (bp->strm->workspace == NULL)
2733 goto gunzip_nomem3;
2734
2735 return 0;
2736
2737gunzip_nomem3:
2738 kfree(bp->strm);
2739 bp->strm = NULL;
2740
2741gunzip_nomem2:
2742 vfree(bp->gunzip_buf);
2743 bp->gunzip_buf = NULL;
2744
2745gunzip_nomem1:
2746 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for "
2747 "uncompression.\n", bp->dev->name);
2748 return -ENOMEM;
2749}
2750
2751static void
2752bnx2_gunzip_end(struct bnx2 *bp)
2753{
2754 kfree(bp->strm->workspace);
2755
2756 kfree(bp->strm);
2757 bp->strm = NULL;
2758
2759 if (bp->gunzip_buf) {
2760 vfree(bp->gunzip_buf);
2761 bp->gunzip_buf = NULL;
2762 }
2763}
2764
2765static int
2766bnx2_gunzip(struct bnx2 *bp, u8 *zbuf, int len, void **outbuf, int *outlen)
2767{
2768 int n, rc;
2769
2770 /* check gzip header */
2771 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
2772 return -EINVAL;
2773
2774 n = 10;
2775
2776#define FNAME 0x8
2777 if (zbuf[3] & FNAME)
2778 while ((zbuf[n++] != 0) && (n < len));
2779
2780 bp->strm->next_in = zbuf + n;
2781 bp->strm->avail_in = len - n;
2782 bp->strm->next_out = bp->gunzip_buf;
2783 bp->strm->avail_out = FW_BUF_SIZE;
2784
2785 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
2786 if (rc != Z_OK)
2787 return rc;
2788
2789 rc = zlib_inflate(bp->strm, Z_FINISH);
2790
2791 *outlen = FW_BUF_SIZE - bp->strm->avail_out;
2792 *outbuf = bp->gunzip_buf;
2793
2794 if ((rc != Z_OK) && (rc != Z_STREAM_END))
2795 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
2796 bp->dev->name, bp->strm->msg);
2797
2798 zlib_inflateEnd(bp->strm);
2799
2800 if (rc == Z_STREAM_END)
2801 return 0;
2802
2803 return rc;
2804}
2805
b6016b76
MC
2806static void
2807load_rv2p_fw(struct bnx2 *bp, u32 *rv2p_code, u32 rv2p_code_len,
2808 u32 rv2p_proc)
2809{
2810 int i;
2811 u32 val;
2812
2813
2814 for (i = 0; i < rv2p_code_len; i += 8) {
fba9fe91 2815 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, cpu_to_le32(*rv2p_code));
b6016b76 2816 rv2p_code++;
fba9fe91 2817 REG_WR(bp, BNX2_RV2P_INSTR_LOW, cpu_to_le32(*rv2p_code));
b6016b76
MC
2818 rv2p_code++;
2819
2820 if (rv2p_proc == RV2P_PROC1) {
2821 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
2822 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
2823 }
2824 else {
2825 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
2826 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
2827 }
2828 }
2829
2830 /* Reset the processor, un-stall is done later. */
2831 if (rv2p_proc == RV2P_PROC1) {
2832 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
2833 }
2834 else {
2835 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
2836 }
2837}
2838
af3ee519 2839static int
b6016b76
MC
2840load_cpu_fw(struct bnx2 *bp, struct cpu_reg *cpu_reg, struct fw_info *fw)
2841{
2842 u32 offset;
2843 u32 val;
af3ee519 2844 int rc;
b6016b76
MC
2845
2846 /* Halt the CPU. */
2847 val = REG_RD_IND(bp, cpu_reg->mode);
2848 val |= cpu_reg->mode_value_halt;
2849 REG_WR_IND(bp, cpu_reg->mode, val);
2850 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2851
2852 /* Load the Text area. */
2853 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
af3ee519
MC
2854 if (fw->gz_text) {
2855 u32 text_len;
2856 void *text;
2857
2858 rc = bnx2_gunzip(bp, fw->gz_text, fw->gz_text_len, &text,
2859 &text_len);
2860 if (rc)
2861 return rc;
2862
2863 fw->text = text;
2864 }
2865 if (fw->gz_text) {
b6016b76
MC
2866 int j;
2867
2868 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
fba9fe91 2869 REG_WR_IND(bp, offset, cpu_to_le32(fw->text[j]));
b6016b76
MC
2870 }
2871 }
2872
2873 /* Load the Data area. */
2874 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
2875 if (fw->data) {
2876 int j;
2877
2878 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2879 REG_WR_IND(bp, offset, fw->data[j]);
2880 }
2881 }
2882
2883 /* Load the SBSS area. */
2884 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
2885 if (fw->sbss) {
2886 int j;
2887
2888 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2889 REG_WR_IND(bp, offset, fw->sbss[j]);
2890 }
2891 }
2892
2893 /* Load the BSS area. */
2894 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
2895 if (fw->bss) {
2896 int j;
2897
2898 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2899 REG_WR_IND(bp, offset, fw->bss[j]);
2900 }
2901 }
2902
2903 /* Load the Read-Only area. */
2904 offset = cpu_reg->spad_base +
2905 (fw->rodata_addr - cpu_reg->mips_view_base);
2906 if (fw->rodata) {
2907 int j;
2908
2909 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2910 REG_WR_IND(bp, offset, fw->rodata[j]);
2911 }
2912 }
2913
2914 /* Clear the pre-fetch instruction. */
2915 REG_WR_IND(bp, cpu_reg->inst, 0);
2916 REG_WR_IND(bp, cpu_reg->pc, fw->start_addr);
2917
2918 /* Start the CPU. */
2919 val = REG_RD_IND(bp, cpu_reg->mode);
2920 val &= ~cpu_reg->mode_value_halt;
2921 REG_WR_IND(bp, cpu_reg->state, cpu_reg->state_value_clear);
2922 REG_WR_IND(bp, cpu_reg->mode, val);
af3ee519
MC
2923
2924 return 0;
b6016b76
MC
2925}
2926
fba9fe91 2927static int
b6016b76
MC
2928bnx2_init_cpus(struct bnx2 *bp)
2929{
2930 struct cpu_reg cpu_reg;
af3ee519 2931 struct fw_info *fw;
fba9fe91
MC
2932 int rc = 0;
2933 void *text;
2934 u32 text_len;
2935
2936 if ((rc = bnx2_gunzip_init(bp)) != 0)
2937 return rc;
b6016b76
MC
2938
2939 /* Initialize the RV2P processor. */
fba9fe91
MC
2940 rc = bnx2_gunzip(bp, bnx2_rv2p_proc1, sizeof(bnx2_rv2p_proc1), &text,
2941 &text_len);
2942 if (rc)
2943 goto init_cpu_err;
2944
2945 load_rv2p_fw(bp, text, text_len, RV2P_PROC1);
2946
2947 rc = bnx2_gunzip(bp, bnx2_rv2p_proc2, sizeof(bnx2_rv2p_proc2), &text,
2948 &text_len);
2949 if (rc)
2950 goto init_cpu_err;
2951
2952 load_rv2p_fw(bp, text, text_len, RV2P_PROC2);
b6016b76
MC
2953
2954 /* Initialize the RX Processor. */
2955 cpu_reg.mode = BNX2_RXP_CPU_MODE;
2956 cpu_reg.mode_value_halt = BNX2_RXP_CPU_MODE_SOFT_HALT;
2957 cpu_reg.mode_value_sstep = BNX2_RXP_CPU_MODE_STEP_ENA;
2958 cpu_reg.state = BNX2_RXP_CPU_STATE;
2959 cpu_reg.state_value_clear = 0xffffff;
2960 cpu_reg.gpr0 = BNX2_RXP_CPU_REG_FILE;
2961 cpu_reg.evmask = BNX2_RXP_CPU_EVENT_MASK;
2962 cpu_reg.pc = BNX2_RXP_CPU_PROGRAM_COUNTER;
2963 cpu_reg.inst = BNX2_RXP_CPU_INSTRUCTION;
2964 cpu_reg.bp = BNX2_RXP_CPU_HW_BREAKPOINT;
2965 cpu_reg.spad_base = BNX2_RXP_SCRATCH;
2966 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 2967
d43584c8
MC
2968 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2969 fw = &bnx2_rxp_fw_09;
2970 else
2971 fw = &bnx2_rxp_fw_06;
fba9fe91 2972
af3ee519 2973 rc = load_cpu_fw(bp, &cpu_reg, fw);
fba9fe91
MC
2974 if (rc)
2975 goto init_cpu_err;
2976
b6016b76
MC
2977 /* Initialize the TX Processor. */
2978 cpu_reg.mode = BNX2_TXP_CPU_MODE;
2979 cpu_reg.mode_value_halt = BNX2_TXP_CPU_MODE_SOFT_HALT;
2980 cpu_reg.mode_value_sstep = BNX2_TXP_CPU_MODE_STEP_ENA;
2981 cpu_reg.state = BNX2_TXP_CPU_STATE;
2982 cpu_reg.state_value_clear = 0xffffff;
2983 cpu_reg.gpr0 = BNX2_TXP_CPU_REG_FILE;
2984 cpu_reg.evmask = BNX2_TXP_CPU_EVENT_MASK;
2985 cpu_reg.pc = BNX2_TXP_CPU_PROGRAM_COUNTER;
2986 cpu_reg.inst = BNX2_TXP_CPU_INSTRUCTION;
2987 cpu_reg.bp = BNX2_TXP_CPU_HW_BREAKPOINT;
2988 cpu_reg.spad_base = BNX2_TXP_SCRATCH;
2989 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 2990
d43584c8
MC
2991 if (CHIP_NUM(bp) == CHIP_NUM_5709)
2992 fw = &bnx2_txp_fw_09;
2993 else
2994 fw = &bnx2_txp_fw_06;
fba9fe91 2995
af3ee519 2996 rc = load_cpu_fw(bp, &cpu_reg, fw);
fba9fe91
MC
2997 if (rc)
2998 goto init_cpu_err;
2999
b6016b76
MC
3000 /* Initialize the TX Patch-up Processor. */
3001 cpu_reg.mode = BNX2_TPAT_CPU_MODE;
3002 cpu_reg.mode_value_halt = BNX2_TPAT_CPU_MODE_SOFT_HALT;
3003 cpu_reg.mode_value_sstep = BNX2_TPAT_CPU_MODE_STEP_ENA;
3004 cpu_reg.state = BNX2_TPAT_CPU_STATE;
3005 cpu_reg.state_value_clear = 0xffffff;
3006 cpu_reg.gpr0 = BNX2_TPAT_CPU_REG_FILE;
3007 cpu_reg.evmask = BNX2_TPAT_CPU_EVENT_MASK;
3008 cpu_reg.pc = BNX2_TPAT_CPU_PROGRAM_COUNTER;
3009 cpu_reg.inst = BNX2_TPAT_CPU_INSTRUCTION;
3010 cpu_reg.bp = BNX2_TPAT_CPU_HW_BREAKPOINT;
3011 cpu_reg.spad_base = BNX2_TPAT_SCRATCH;
3012 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 3013
d43584c8
MC
3014 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3015 fw = &bnx2_tpat_fw_09;
3016 else
3017 fw = &bnx2_tpat_fw_06;
fba9fe91 3018
af3ee519 3019 rc = load_cpu_fw(bp, &cpu_reg, fw);
fba9fe91
MC
3020 if (rc)
3021 goto init_cpu_err;
3022
b6016b76
MC
3023 /* Initialize the Completion Processor. */
3024 cpu_reg.mode = BNX2_COM_CPU_MODE;
3025 cpu_reg.mode_value_halt = BNX2_COM_CPU_MODE_SOFT_HALT;
3026 cpu_reg.mode_value_sstep = BNX2_COM_CPU_MODE_STEP_ENA;
3027 cpu_reg.state = BNX2_COM_CPU_STATE;
3028 cpu_reg.state_value_clear = 0xffffff;
3029 cpu_reg.gpr0 = BNX2_COM_CPU_REG_FILE;
3030 cpu_reg.evmask = BNX2_COM_CPU_EVENT_MASK;
3031 cpu_reg.pc = BNX2_COM_CPU_PROGRAM_COUNTER;
3032 cpu_reg.inst = BNX2_COM_CPU_INSTRUCTION;
3033 cpu_reg.bp = BNX2_COM_CPU_HW_BREAKPOINT;
3034 cpu_reg.spad_base = BNX2_COM_SCRATCH;
3035 cpu_reg.mips_view_base = 0x8000000;
6aa20a22 3036
d43584c8
MC
3037 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3038 fw = &bnx2_com_fw_09;
3039 else
3040 fw = &bnx2_com_fw_06;
fba9fe91 3041
af3ee519 3042 rc = load_cpu_fw(bp, &cpu_reg, fw);
fba9fe91
MC
3043 if (rc)
3044 goto init_cpu_err;
3045
d43584c8
MC
3046 /* Initialize the Command Processor. */
3047 cpu_reg.mode = BNX2_CP_CPU_MODE;
3048 cpu_reg.mode_value_halt = BNX2_CP_CPU_MODE_SOFT_HALT;
3049 cpu_reg.mode_value_sstep = BNX2_CP_CPU_MODE_STEP_ENA;
3050 cpu_reg.state = BNX2_CP_CPU_STATE;
3051 cpu_reg.state_value_clear = 0xffffff;
3052 cpu_reg.gpr0 = BNX2_CP_CPU_REG_FILE;
3053 cpu_reg.evmask = BNX2_CP_CPU_EVENT_MASK;
3054 cpu_reg.pc = BNX2_CP_CPU_PROGRAM_COUNTER;
3055 cpu_reg.inst = BNX2_CP_CPU_INSTRUCTION;
3056 cpu_reg.bp = BNX2_CP_CPU_HW_BREAKPOINT;
3057 cpu_reg.spad_base = BNX2_CP_SCRATCH;
3058 cpu_reg.mips_view_base = 0x8000000;
b6016b76 3059
d43584c8
MC
3060 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3061 fw = &bnx2_cp_fw_09;
b6016b76 3062
6c1bbcc8 3063 rc = load_cpu_fw(bp, &cpu_reg, fw);
d43584c8
MC
3064 if (rc)
3065 goto init_cpu_err;
3066 }
fba9fe91
MC
3067init_cpu_err:
3068 bnx2_gunzip_end(bp);
3069 return rc;
b6016b76
MC
3070}
3071
3072static int
829ca9a3 3073bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
b6016b76
MC
3074{
3075 u16 pmcsr;
3076
3077 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3078
3079 switch (state) {
829ca9a3 3080 case PCI_D0: {
b6016b76
MC
3081 u32 val;
3082
3083 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3084 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3085 PCI_PM_CTRL_PME_STATUS);
3086
3087 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3088 /* delay required during transition out of D3hot */
3089 msleep(20);
3090
3091 val = REG_RD(bp, BNX2_EMAC_MODE);
3092 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3093 val &= ~BNX2_EMAC_MODE_MPKT;
3094 REG_WR(bp, BNX2_EMAC_MODE, val);
3095
3096 val = REG_RD(bp, BNX2_RPM_CONFIG);
3097 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3098 REG_WR(bp, BNX2_RPM_CONFIG, val);
3099 break;
3100 }
829ca9a3 3101 case PCI_D3hot: {
b6016b76
MC
3102 int i;
3103 u32 val, wol_msg;
3104
3105 if (bp->wol) {
3106 u32 advertising;
3107 u8 autoneg;
3108
3109 autoneg = bp->autoneg;
3110 advertising = bp->advertising;
3111
3112 bp->autoneg = AUTONEG_SPEED;
3113 bp->advertising = ADVERTISED_10baseT_Half |
3114 ADVERTISED_10baseT_Full |
3115 ADVERTISED_100baseT_Half |
3116 ADVERTISED_100baseT_Full |
3117 ADVERTISED_Autoneg;
3118
3119 bnx2_setup_copper_phy(bp);
3120
3121 bp->autoneg = autoneg;
3122 bp->advertising = advertising;
3123
3124 bnx2_set_mac_addr(bp);
3125
3126 val = REG_RD(bp, BNX2_EMAC_MODE);
3127
3128 /* Enable port mode. */
3129 val &= ~BNX2_EMAC_MODE_PORT;
3130 val |= BNX2_EMAC_MODE_PORT_MII |
3131 BNX2_EMAC_MODE_MPKT_RCVD |
3132 BNX2_EMAC_MODE_ACPI_RCVD |
b6016b76
MC
3133 BNX2_EMAC_MODE_MPKT;
3134
3135 REG_WR(bp, BNX2_EMAC_MODE, val);
3136
3137 /* receive all multicast */
3138 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3139 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3140 0xffffffff);
3141 }
3142 REG_WR(bp, BNX2_EMAC_RX_MODE,
3143 BNX2_EMAC_RX_MODE_SORT_MODE);
3144
3145 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3146 BNX2_RPM_SORT_USER0_MC_EN;
3147 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3148 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3149 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3150 BNX2_RPM_SORT_USER0_ENA);
3151
3152 /* Need to enable EMAC and RPM for WOL. */
3153 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3154 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3155 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3156 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3157
3158 val = REG_RD(bp, BNX2_RPM_CONFIG);
3159 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3160 REG_WR(bp, BNX2_RPM_CONFIG, val);
3161
3162 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3163 }
3164 else {
3165 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3166 }
3167
dda1e390
MC
3168 if (!(bp->flags & NO_WOL_FLAG))
3169 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg, 0);
b6016b76
MC
3170
3171 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3172 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3173 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3174
3175 if (bp->wol)
3176 pmcsr |= 3;
3177 }
3178 else {
3179 pmcsr |= 3;
3180 }
3181 if (bp->wol) {
3182 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3183 }
3184 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3185 pmcsr);
3186
3187 /* No more memory access after this point until
3188 * device is brought back to D0.
3189 */
3190 udelay(50);
3191 break;
3192 }
3193 default:
3194 return -EINVAL;
3195 }
3196 return 0;
3197}
3198
3199static int
3200bnx2_acquire_nvram_lock(struct bnx2 *bp)
3201{
3202 u32 val;
3203 int j;
3204
3205 /* Request access to the flash interface. */
3206 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3207 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3208 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3209 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3210 break;
3211
3212 udelay(5);
3213 }
3214
3215 if (j >= NVRAM_TIMEOUT_COUNT)
3216 return -EBUSY;
3217
3218 return 0;
3219}
3220
3221static int
3222bnx2_release_nvram_lock(struct bnx2 *bp)
3223{
3224 int j;
3225 u32 val;
3226
3227 /* Relinquish nvram interface. */
3228 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3229
3230 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3231 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3232 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3233 break;
3234
3235 udelay(5);
3236 }
3237
3238 if (j >= NVRAM_TIMEOUT_COUNT)
3239 return -EBUSY;
3240
3241 return 0;
3242}
3243
3244
3245static int
3246bnx2_enable_nvram_write(struct bnx2 *bp)
3247{
3248 u32 val;
3249
3250 val = REG_RD(bp, BNX2_MISC_CFG);
3251 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3252
3253 if (!bp->flash_info->buffered) {
3254 int j;
3255
3256 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3257 REG_WR(bp, BNX2_NVM_COMMAND,
3258 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3259
3260 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3261 udelay(5);
3262
3263 val = REG_RD(bp, BNX2_NVM_COMMAND);
3264 if (val & BNX2_NVM_COMMAND_DONE)
3265 break;
3266 }
3267
3268 if (j >= NVRAM_TIMEOUT_COUNT)
3269 return -EBUSY;
3270 }
3271 return 0;
3272}
3273
3274static void
3275bnx2_disable_nvram_write(struct bnx2 *bp)
3276{
3277 u32 val;
3278
3279 val = REG_RD(bp, BNX2_MISC_CFG);
3280 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3281}
3282
3283
3284static void
3285bnx2_enable_nvram_access(struct bnx2 *bp)
3286{
3287 u32 val;
3288
3289 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3290 /* Enable both bits, even on read. */
6aa20a22 3291 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
b6016b76
MC
3292 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3293}
3294
3295static void
3296bnx2_disable_nvram_access(struct bnx2 *bp)
3297{
3298 u32 val;
3299
3300 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3301 /* Disable both bits, even after read. */
6aa20a22 3302 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
b6016b76
MC
3303 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3304 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3305}
3306
3307static int
3308bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3309{
3310 u32 cmd;
3311 int j;
3312
3313 if (bp->flash_info->buffered)
3314 /* Buffered flash, no erase needed */
3315 return 0;
3316
3317 /* Build an erase command */
3318 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3319 BNX2_NVM_COMMAND_DOIT;
3320
3321 /* Need to clear DONE bit separately. */
3322 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3323
3324 /* Address of the NVRAM to read from. */
3325 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3326
3327 /* Issue an erase command. */
3328 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3329
3330 /* Wait for completion. */
3331 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3332 u32 val;
3333
3334 udelay(5);
3335
3336 val = REG_RD(bp, BNX2_NVM_COMMAND);
3337 if (val & BNX2_NVM_COMMAND_DONE)
3338 break;
3339 }
3340
3341 if (j >= NVRAM_TIMEOUT_COUNT)
3342 return -EBUSY;
3343
3344 return 0;
3345}
3346
3347static int
3348bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3349{
3350 u32 cmd;
3351 int j;
3352
3353 /* Build the command word. */
3354 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3355
3356 /* Calculate an offset of a buffered flash. */
3357 if (bp->flash_info->buffered) {
3358 offset = ((offset / bp->flash_info->page_size) <<
3359 bp->flash_info->page_bits) +
3360 (offset % bp->flash_info->page_size);
3361 }
3362
3363 /* Need to clear DONE bit separately. */
3364 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3365
3366 /* Address of the NVRAM to read from. */
3367 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3368
3369 /* Issue a read command. */
3370 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3371
3372 /* Wait for completion. */
3373 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3374 u32 val;
3375
3376 udelay(5);
3377
3378 val = REG_RD(bp, BNX2_NVM_COMMAND);
3379 if (val & BNX2_NVM_COMMAND_DONE) {
3380 val = REG_RD(bp, BNX2_NVM_READ);
3381
3382 val = be32_to_cpu(val);
3383 memcpy(ret_val, &val, 4);
3384 break;
3385 }
3386 }
3387 if (j >= NVRAM_TIMEOUT_COUNT)
3388 return -EBUSY;
3389
3390 return 0;
3391}
3392
3393
3394static int
3395bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3396{
3397 u32 cmd, val32;
3398 int j;
3399
3400 /* Build the command word. */
3401 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3402
3403 /* Calculate an offset of a buffered flash. */
3404 if (bp->flash_info->buffered) {
3405 offset = ((offset / bp->flash_info->page_size) <<
3406 bp->flash_info->page_bits) +
3407 (offset % bp->flash_info->page_size);
3408 }
3409
3410 /* Need to clear DONE bit separately. */
3411 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3412
3413 memcpy(&val32, val, 4);
3414 val32 = cpu_to_be32(val32);
3415
3416 /* Write the data. */
3417 REG_WR(bp, BNX2_NVM_WRITE, val32);
3418
3419 /* Address of the NVRAM to write to. */
3420 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3421
3422 /* Issue the write command. */
3423 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3424
3425 /* Wait for completion. */
3426 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3427 udelay(5);
3428
3429 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3430 break;
3431 }
3432 if (j >= NVRAM_TIMEOUT_COUNT)
3433 return -EBUSY;
3434
3435 return 0;
3436}
3437
3438static int
3439bnx2_init_nvram(struct bnx2 *bp)
3440{
3441 u32 val;
3442 int j, entry_count, rc;
3443 struct flash_spec *flash;
3444
3445 /* Determine the selected interface. */
3446 val = REG_RD(bp, BNX2_NVM_CFG1);
3447
3448 entry_count = sizeof(flash_table) / sizeof(struct flash_spec);
3449
3450 rc = 0;
3451 if (val & 0x40000000) {
3452
3453 /* Flash interface has been reconfigured */
3454 for (j = 0, flash = &flash_table[0]; j < entry_count;
37137709
MC
3455 j++, flash++) {
3456 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3457 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
b6016b76
MC
3458 bp->flash_info = flash;
3459 break;
3460 }
3461 }
3462 }
3463 else {
37137709 3464 u32 mask;
b6016b76
MC
3465 /* Not yet been reconfigured */
3466
37137709
MC
3467 if (val & (1 << 23))
3468 mask = FLASH_BACKUP_STRAP_MASK;
3469 else
3470 mask = FLASH_STRAP_MASK;
3471
b6016b76
MC
3472 for (j = 0, flash = &flash_table[0]; j < entry_count;
3473 j++, flash++) {
3474
37137709 3475 if ((val & mask) == (flash->strapping & mask)) {
b6016b76
MC
3476 bp->flash_info = flash;
3477
3478 /* Request access to the flash interface. */
3479 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3480 return rc;
3481
3482 /* Enable access to flash interface */
3483 bnx2_enable_nvram_access(bp);
3484
3485 /* Reconfigure the flash interface */
3486 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
3487 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
3488 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
3489 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
3490
3491 /* Disable access to flash interface */
3492 bnx2_disable_nvram_access(bp);
3493 bnx2_release_nvram_lock(bp);
3494
3495 break;
3496 }
3497 }
3498 } /* if (val & 0x40000000) */
3499
3500 if (j == entry_count) {
3501 bp->flash_info = NULL;
2f23c523 3502 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
1122db71 3503 return -ENODEV;
b6016b76
MC
3504 }
3505
1122db71
MC
3506 val = REG_RD_IND(bp, bp->shmem_base + BNX2_SHARED_HW_CFG_CONFIG2);
3507 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
3508 if (val)
3509 bp->flash_size = val;
3510 else
3511 bp->flash_size = bp->flash_info->total_size;
3512
b6016b76
MC
3513 return rc;
3514}
3515
3516static int
3517bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
3518 int buf_size)
3519{
3520 int rc = 0;
3521 u32 cmd_flags, offset32, len32, extra;
3522
3523 if (buf_size == 0)
3524 return 0;
3525
3526 /* Request access to the flash interface. */
3527 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3528 return rc;
3529
3530 /* Enable access to flash interface */
3531 bnx2_enable_nvram_access(bp);
3532
3533 len32 = buf_size;
3534 offset32 = offset;
3535 extra = 0;
3536
3537 cmd_flags = 0;
3538
3539 if (offset32 & 3) {
3540 u8 buf[4];
3541 u32 pre_len;
3542
3543 offset32 &= ~3;
3544 pre_len = 4 - (offset & 3);
3545
3546 if (pre_len >= len32) {
3547 pre_len = len32;
3548 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3549 BNX2_NVM_COMMAND_LAST;
3550 }
3551 else {
3552 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3553 }
3554
3555 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3556
3557 if (rc)
3558 return rc;
3559
3560 memcpy(ret_buf, buf + (offset & 3), pre_len);
3561
3562 offset32 += 4;
3563 ret_buf += pre_len;
3564 len32 -= pre_len;
3565 }
3566 if (len32 & 3) {
3567 extra = 4 - (len32 & 3);
3568 len32 = (len32 + 4) & ~3;
3569 }
3570
3571 if (len32 == 4) {
3572 u8 buf[4];
3573
3574 if (cmd_flags)
3575 cmd_flags = BNX2_NVM_COMMAND_LAST;
3576 else
3577 cmd_flags = BNX2_NVM_COMMAND_FIRST |
3578 BNX2_NVM_COMMAND_LAST;
3579
3580 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3581
3582 memcpy(ret_buf, buf, 4 - extra);
3583 }
3584 else if (len32 > 0) {
3585 u8 buf[4];
3586
3587 /* Read the first word. */
3588 if (cmd_flags)
3589 cmd_flags = 0;
3590 else
3591 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3592
3593 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
3594
3595 /* Advance to the next dword. */
3596 offset32 += 4;
3597 ret_buf += 4;
3598 len32 -= 4;
3599
3600 while (len32 > 4 && rc == 0) {
3601 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
3602
3603 /* Advance to the next dword. */
3604 offset32 += 4;
3605 ret_buf += 4;
3606 len32 -= 4;
3607 }
3608
3609 if (rc)
3610 return rc;
3611
3612 cmd_flags = BNX2_NVM_COMMAND_LAST;
3613 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
3614
3615 memcpy(ret_buf, buf, 4 - extra);
3616 }
3617
3618 /* Disable access to flash interface */
3619 bnx2_disable_nvram_access(bp);
3620
3621 bnx2_release_nvram_lock(bp);
3622
3623 return rc;
3624}
3625
3626static int
3627bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
3628 int buf_size)
3629{
3630 u32 written, offset32, len32;
e6be763f 3631 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
b6016b76
MC
3632 int rc = 0;
3633 int align_start, align_end;
3634
3635 buf = data_buf;
3636 offset32 = offset;
3637 len32 = buf_size;
3638 align_start = align_end = 0;
3639
3640 if ((align_start = (offset32 & 3))) {
3641 offset32 &= ~3;
c873879c
MC
3642 len32 += align_start;
3643 if (len32 < 4)
3644 len32 = 4;
b6016b76
MC
3645 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
3646 return rc;
3647 }
3648
3649 if (len32 & 3) {
c873879c
MC
3650 align_end = 4 - (len32 & 3);
3651 len32 += align_end;
3652 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
3653 return rc;
b6016b76
MC
3654 }
3655
3656 if (align_start || align_end) {
e6be763f
MC
3657 align_buf = kmalloc(len32, GFP_KERNEL);
3658 if (align_buf == NULL)
b6016b76
MC
3659 return -ENOMEM;
3660 if (align_start) {
e6be763f 3661 memcpy(align_buf, start, 4);
b6016b76
MC
3662 }
3663 if (align_end) {
e6be763f 3664 memcpy(align_buf + len32 - 4, end, 4);
b6016b76 3665 }
e6be763f
MC
3666 memcpy(align_buf + align_start, data_buf, buf_size);
3667 buf = align_buf;
b6016b76
MC
3668 }
3669
ae181bc4
MC
3670 if (bp->flash_info->buffered == 0) {
3671 flash_buffer = kmalloc(264, GFP_KERNEL);
3672 if (flash_buffer == NULL) {
3673 rc = -ENOMEM;
3674 goto nvram_write_end;
3675 }
3676 }
3677
b6016b76
MC
3678 written = 0;
3679 while ((written < len32) && (rc == 0)) {
3680 u32 page_start, page_end, data_start, data_end;
3681 u32 addr, cmd_flags;
3682 int i;
b6016b76
MC
3683
3684 /* Find the page_start addr */
3685 page_start = offset32 + written;
3686 page_start -= (page_start % bp->flash_info->page_size);
3687 /* Find the page_end addr */
3688 page_end = page_start + bp->flash_info->page_size;
3689 /* Find the data_start addr */
3690 data_start = (written == 0) ? offset32 : page_start;
3691 /* Find the data_end addr */
6aa20a22 3692 data_end = (page_end > offset32 + len32) ?
b6016b76
MC
3693 (offset32 + len32) : page_end;
3694
3695 /* Request access to the flash interface. */
3696 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
3697 goto nvram_write_end;
3698
3699 /* Enable access to flash interface */
3700 bnx2_enable_nvram_access(bp);
3701
3702 cmd_flags = BNX2_NVM_COMMAND_FIRST;
3703 if (bp->flash_info->buffered == 0) {
3704 int j;
3705
3706 /* Read the whole page into the buffer
3707 * (non-buffer flash only) */
3708 for (j = 0; j < bp->flash_info->page_size; j += 4) {
3709 if (j == (bp->flash_info->page_size - 4)) {
3710 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3711 }
3712 rc = bnx2_nvram_read_dword(bp,
6aa20a22
JG
3713 page_start + j,
3714 &flash_buffer[j],
b6016b76
MC
3715 cmd_flags);
3716
3717 if (rc)
3718 goto nvram_write_end;
3719
3720 cmd_flags = 0;
3721 }
3722 }
3723
3724 /* Enable writes to flash interface (unlock write-protect) */
3725 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
3726 goto nvram_write_end;
3727
b6016b76
MC
3728 /* Loop to write back the buffer data from page_start to
3729 * data_start */
3730 i = 0;
3731 if (bp->flash_info->buffered == 0) {
c873879c
MC
3732 /* Erase the page */
3733 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
3734 goto nvram_write_end;
3735
3736 /* Re-enable the write again for the actual write */
3737 bnx2_enable_nvram_write(bp);
3738
b6016b76
MC
3739 for (addr = page_start; addr < data_start;
3740 addr += 4, i += 4) {
6aa20a22 3741
b6016b76
MC
3742 rc = bnx2_nvram_write_dword(bp, addr,
3743 &flash_buffer[i], cmd_flags);
3744
3745 if (rc != 0)
3746 goto nvram_write_end;
3747
3748 cmd_flags = 0;
3749 }
3750 }
3751
3752 /* Loop to write the new data from data_start to data_end */
bae25761 3753 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
b6016b76
MC
3754 if ((addr == page_end - 4) ||
3755 ((bp->flash_info->buffered) &&
3756 (addr == data_end - 4))) {
3757
3758 cmd_flags |= BNX2_NVM_COMMAND_LAST;
3759 }
3760 rc = bnx2_nvram_write_dword(bp, addr, buf,
3761 cmd_flags);
3762
3763 if (rc != 0)
3764 goto nvram_write_end;
3765
3766 cmd_flags = 0;
3767 buf += 4;
3768 }
3769
3770 /* Loop to write back the buffer data from data_end
3771 * to page_end */
3772 if (bp->flash_info->buffered == 0) {
3773 for (addr = data_end; addr < page_end;
3774 addr += 4, i += 4) {
6aa20a22 3775
b6016b76
MC
3776 if (addr == page_end-4) {
3777 cmd_flags = BNX2_NVM_COMMAND_LAST;
3778 }
3779 rc = bnx2_nvram_write_dword(bp, addr,
3780 &flash_buffer[i], cmd_flags);
3781
3782 if (rc != 0)
3783 goto nvram_write_end;
3784
3785 cmd_flags = 0;
3786 }
3787 }
3788
3789 /* Disable writes to flash interface (lock write-protect) */
3790 bnx2_disable_nvram_write(bp);
3791
3792 /* Disable access to flash interface */
3793 bnx2_disable_nvram_access(bp);
3794 bnx2_release_nvram_lock(bp);
3795
3796 /* Increment written */
3797 written += data_end - data_start;
3798 }
3799
3800nvram_write_end:
e6be763f
MC
3801 kfree(flash_buffer);
3802 kfree(align_buf);
b6016b76
MC
3803 return rc;
3804}
3805
0d8a6571
MC
3806static void
3807bnx2_init_remote_phy(struct bnx2 *bp)
3808{
3809 u32 val;
3810
3811 bp->phy_flags &= ~REMOTE_PHY_CAP_FLAG;
3812 if (!(bp->phy_flags & PHY_SERDES_FLAG))
3813 return;
3814
3815 val = REG_RD_IND(bp, bp->shmem_base + BNX2_FW_CAP_MB);
3816 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
3817 return;
3818
3819 if (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE) {
3820 if (netif_running(bp->dev)) {
3821 val = BNX2_DRV_ACK_CAP_SIGNATURE |
3822 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
3823 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_ACK_CAP_MB,
3824 val);
3825 }
3826 bp->phy_flags |= REMOTE_PHY_CAP_FLAG;
3827
3828 val = REG_RD_IND(bp, bp->shmem_base + BNX2_LINK_STATUS);
3829 if (val & BNX2_LINK_STATUS_SERDES_LINK)
3830 bp->phy_port = PORT_FIBRE;
3831 else
3832 bp->phy_port = PORT_TP;
3833 }
3834}
3835
b6016b76
MC
3836static int
3837bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
3838{
3839 u32 val;
3840 int i, rc = 0;
3841
3842 /* Wait for the current PCI transaction to complete before
3843 * issuing a reset. */
3844 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
3845 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
3846 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
3847 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
3848 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
3849 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
3850 udelay(5);
3851
b090ae2b
MC
3852 /* Wait for the firmware to tell us it is ok to issue a reset. */
3853 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1);
3854
b6016b76
MC
3855 /* Deposit a driver reset signature so the firmware knows that
3856 * this is a soft reset. */
e3648b3d 3857 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_RESET_SIGNATURE,
b6016b76
MC
3858 BNX2_DRV_RESET_SIGNATURE_MAGIC);
3859
b6016b76
MC
3860 /* Do a dummy read to force the chip to complete all current transaction
3861 * before we issue a reset. */
3862 val = REG_RD(bp, BNX2_MISC_ID);
3863
234754d5
MC
3864 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3865 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
3866 REG_RD(bp, BNX2_MISC_COMMAND);
3867 udelay(5);
b6016b76 3868
234754d5
MC
3869 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3870 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
b6016b76 3871
234754d5 3872 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
b6016b76 3873
234754d5
MC
3874 } else {
3875 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3876 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
3877 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
3878
3879 /* Chip reset. */
3880 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
3881
3882 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3883 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3884 current->state = TASK_UNINTERRUPTIBLE;
3885 schedule_timeout(HZ / 50);
b6016b76 3886 }
b6016b76 3887
234754d5
MC
3888 /* Reset takes approximate 30 usec */
3889 for (i = 0; i < 10; i++) {
3890 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
3891 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3892 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
3893 break;
3894 udelay(10);
3895 }
3896
3897 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
3898 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3899 printk(KERN_ERR PFX "Chip reset did not complete\n");
3900 return -EBUSY;
3901 }
b6016b76
MC
3902 }
3903
3904 /* Make sure byte swapping is properly configured. */
3905 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
3906 if (val != 0x01020304) {
3907 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
3908 return -ENODEV;
3909 }
3910
b6016b76 3911 /* Wait for the firmware to finish its initialization. */
b090ae2b
MC
3912 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 0);
3913 if (rc)
3914 return rc;
b6016b76 3915
0d8a6571
MC
3916 spin_lock_bh(&bp->phy_lock);
3917 bnx2_init_remote_phy(bp);
3918 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
3919 bnx2_set_default_remote_link(bp);
3920 spin_unlock_bh(&bp->phy_lock);
3921
b6016b76
MC
3922 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3923 /* Adjust the voltage regular to two steps lower. The default
3924 * of this register is 0x0000000e. */
3925 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
3926
3927 /* Remove bad rbuf memory from the free pool. */
3928 rc = bnx2_alloc_bad_rbuf(bp);
3929 }
3930
3931 return rc;
3932}
3933
3934static int
3935bnx2_init_chip(struct bnx2 *bp)
3936{
3937 u32 val;
b090ae2b 3938 int rc;
b6016b76
MC
3939
3940 /* Make sure the interrupt is not active. */
3941 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3942
3943 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
3944 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
3945#ifdef __BIG_ENDIAN
6aa20a22 3946 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
b6016b76 3947#endif
6aa20a22 3948 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
b6016b76
MC
3949 DMA_READ_CHANS << 12 |
3950 DMA_WRITE_CHANS << 16;
3951
3952 val |= (0x2 << 20) | (1 << 11);
3953
dda1e390 3954 if ((bp->flags & PCIX_FLAG) && (bp->bus_speed_mhz == 133))
b6016b76
MC
3955 val |= (1 << 23);
3956
3957 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
3958 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & PCIX_FLAG))
3959 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
3960
3961 REG_WR(bp, BNX2_DMA_CONFIG, val);
3962
3963 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
3964 val = REG_RD(bp, BNX2_TDMA_CONFIG);
3965 val |= BNX2_TDMA_CONFIG_ONE_DMA;
3966 REG_WR(bp, BNX2_TDMA_CONFIG, val);
3967 }
3968
3969 if (bp->flags & PCIX_FLAG) {
3970 u16 val16;
3971
3972 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3973 &val16);
3974 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
3975 val16 & ~PCI_X_CMD_ERO);
3976 }
3977
3978 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3979 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
3980 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
3981 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
3982
3983 /* Initialize context mapping and zero out the quick contexts. The
3984 * context block must have already been enabled. */
641bdcd5
MC
3985 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3986 rc = bnx2_init_5709_context(bp);
3987 if (rc)
3988 return rc;
3989 } else
59b47d8a 3990 bnx2_init_context(bp);
b6016b76 3991
fba9fe91
MC
3992 if ((rc = bnx2_init_cpus(bp)) != 0)
3993 return rc;
3994
b6016b76
MC
3995 bnx2_init_nvram(bp);
3996
3997 bnx2_set_mac_addr(bp);
3998
3999 val = REG_RD(bp, BNX2_MQ_CONFIG);
4000 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4001 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
68c9f75a
MC
4002 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4003 val |= BNX2_MQ_CONFIG_HALT_DIS;
4004
b6016b76
MC
4005 REG_WR(bp, BNX2_MQ_CONFIG, val);
4006
4007 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4008 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4009 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4010
4011 val = (BCM_PAGE_BITS - 8) << 24;
4012 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4013
4014 /* Configure page size. */
4015 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4016 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4017 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4018 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4019
4020 val = bp->mac_addr[0] +
4021 (bp->mac_addr[1] << 8) +
4022 (bp->mac_addr[2] << 16) +
4023 bp->mac_addr[3] +
4024 (bp->mac_addr[4] << 8) +
4025 (bp->mac_addr[5] << 16);
4026 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4027
4028 /* Program the MTU. Also include 4 bytes for CRC32. */
4029 val = bp->dev->mtu + ETH_HLEN + 4;
4030 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4031 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4032 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4033
4034 bp->last_status_idx = 0;
4035 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4036
4037 /* Set up how to generate a link change interrupt. */
4038 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4039
4040 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4041 (u64) bp->status_blk_mapping & 0xffffffff);
4042 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4043
4044 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4045 (u64) bp->stats_blk_mapping & 0xffffffff);
4046 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4047 (u64) bp->stats_blk_mapping >> 32);
4048
6aa20a22 4049 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
b6016b76
MC
4050 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4051
4052 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4053 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4054
4055 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4056 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4057
4058 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4059
4060 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4061
4062 REG_WR(bp, BNX2_HC_COM_TICKS,
4063 (bp->com_ticks_int << 16) | bp->com_ticks);
4064
4065 REG_WR(bp, BNX2_HC_CMD_TICKS,
4066 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4067
02537b06
MC
4068 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4069 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4070 else
4071 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks & 0xffff00);
b6016b76
MC
4072 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4073
4074 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
8e6a72c4 4075 val = BNX2_HC_CONFIG_COLLECT_STATS;
b6016b76 4076 else {
8e6a72c4
MC
4077 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4078 BNX2_HC_CONFIG_COLLECT_STATS;
b6016b76
MC
4079 }
4080
8e6a72c4
MC
4081 if (bp->flags & ONE_SHOT_MSI_FLAG)
4082 val |= BNX2_HC_CONFIG_ONE_SHOT;
4083
4084 REG_WR(bp, BNX2_HC_CONFIG, val);
4085
b6016b76
MC
4086 /* Clear internal stats counters. */
4087 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4088
da3e4fbe 4089 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
b6016b76 4090
e29054f9
MC
4091 if (REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_FEATURE) &
4092 BNX2_PORT_FEATURE_ASF_ENABLED)
4093 bp->flags |= ASF_ENABLE_FLAG;
4094
b6016b76
MC
4095 /* Initialize the receive filter. */
4096 bnx2_set_rx_mode(bp->dev);
4097
0aa38df7
MC
4098 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4099 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4100 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4101 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4102 }
b090ae2b
MC
4103 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4104 0);
b6016b76
MC
4105
4106 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, 0x5ffffff);
4107 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4108
4109 udelay(20);
4110
bf5295bb
MC
4111 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4112
b090ae2b 4113 return rc;
b6016b76
MC
4114}
4115
59b47d8a
MC
4116static void
4117bnx2_init_tx_context(struct bnx2 *bp, u32 cid)
4118{
4119 u32 val, offset0, offset1, offset2, offset3;
4120
4121 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4122 offset0 = BNX2_L2CTX_TYPE_XI;
4123 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4124 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4125 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4126 } else {
4127 offset0 = BNX2_L2CTX_TYPE;
4128 offset1 = BNX2_L2CTX_CMD_TYPE;
4129 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4130 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4131 }
4132 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
4133 CTX_WR(bp, GET_CID_ADDR(cid), offset0, val);
4134
4135 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
4136 CTX_WR(bp, GET_CID_ADDR(cid), offset1, val);
4137
4138 val = (u64) bp->tx_desc_mapping >> 32;
4139 CTX_WR(bp, GET_CID_ADDR(cid), offset2, val);
4140
4141 val = (u64) bp->tx_desc_mapping & 0xffffffff;
4142 CTX_WR(bp, GET_CID_ADDR(cid), offset3, val);
4143}
b6016b76
MC
4144
4145static void
4146bnx2_init_tx_ring(struct bnx2 *bp)
4147{
4148 struct tx_bd *txbd;
59b47d8a 4149 u32 cid;
b6016b76 4150
2f8af120
MC
4151 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4152
b6016b76 4153 txbd = &bp->tx_desc_ring[MAX_TX_DESC_CNT];
6aa20a22 4154
b6016b76
MC
4155 txbd->tx_bd_haddr_hi = (u64) bp->tx_desc_mapping >> 32;
4156 txbd->tx_bd_haddr_lo = (u64) bp->tx_desc_mapping & 0xffffffff;
4157
4158 bp->tx_prod = 0;
4159 bp->tx_cons = 0;
f4e418f7 4160 bp->hw_tx_cons = 0;
b6016b76 4161 bp->tx_prod_bseq = 0;
6aa20a22 4162
59b47d8a
MC
4163 cid = TX_CID;
4164 bp->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4165 bp->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
b6016b76 4166
59b47d8a 4167 bnx2_init_tx_context(bp, cid);
b6016b76
MC
4168}
4169
4170static void
4171bnx2_init_rx_ring(struct bnx2 *bp)
4172{
4173 struct rx_bd *rxbd;
4174 int i;
6aa20a22 4175 u16 prod, ring_prod;
b6016b76
MC
4176 u32 val;
4177
4178 /* 8 for CRC and VLAN */
4179 bp->rx_buf_use_size = bp->dev->mtu + ETH_HLEN + bp->rx_offset + 8;
59b47d8a
MC
4180 /* hw alignment */
4181 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
b6016b76
MC
4182
4183 ring_prod = prod = bp->rx_prod = 0;
4184 bp->rx_cons = 0;
f4e418f7 4185 bp->hw_rx_cons = 0;
b6016b76 4186 bp->rx_prod_bseq = 0;
6aa20a22 4187
13daffa2
MC
4188 for (i = 0; i < bp->rx_max_ring; i++) {
4189 int j;
b6016b76 4190
13daffa2
MC
4191 rxbd = &bp->rx_desc_ring[i][0];
4192 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
4193 rxbd->rx_bd_len = bp->rx_buf_use_size;
4194 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4195 }
4196 if (i == (bp->rx_max_ring - 1))
4197 j = 0;
4198 else
4199 j = i + 1;
4200 rxbd->rx_bd_haddr_hi = (u64) bp->rx_desc_mapping[j] >> 32;
4201 rxbd->rx_bd_haddr_lo = (u64) bp->rx_desc_mapping[j] &
4202 0xffffffff;
4203 }
b6016b76
MC
4204
4205 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
4206 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
4207 val |= 0x02 << 8;
4208 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_CTX_TYPE, val);
4209
13daffa2 4210 val = (u64) bp->rx_desc_mapping[0] >> 32;
b6016b76
MC
4211 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_HI, val);
4212
13daffa2 4213 val = (u64) bp->rx_desc_mapping[0] & 0xffffffff;
b6016b76
MC
4214 CTX_WR(bp, GET_CID_ADDR(RX_CID), BNX2_L2CTX_NX_BDHADDR_LO, val);
4215
236b6394 4216 for (i = 0; i < bp->rx_ring_size; i++) {
b6016b76
MC
4217 if (bnx2_alloc_rx_skb(bp, ring_prod) < 0) {
4218 break;
4219 }
4220 prod = NEXT_RX_BD(prod);
4221 ring_prod = RX_RING_IDX(prod);
4222 }
4223 bp->rx_prod = prod;
4224
4225 REG_WR16(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BDIDX, prod);
4226
4227 REG_WR(bp, MB_RX_CID_ADDR + BNX2_L2CTX_HOST_BSEQ, bp->rx_prod_bseq);
4228}
4229
13daffa2
MC
4230static void
4231bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4232{
4233 u32 num_rings, max;
4234
4235 bp->rx_ring_size = size;
4236 num_rings = 1;
4237 while (size > MAX_RX_DESC_CNT) {
4238 size -= MAX_RX_DESC_CNT;
4239 num_rings++;
4240 }
4241 /* round to next power of 2 */
4242 max = MAX_RX_RINGS;
4243 while ((max & num_rings) == 0)
4244 max >>= 1;
4245
4246 if (num_rings != max)
4247 max <<= 1;
4248
4249 bp->rx_max_ring = max;
4250 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
4251}
4252
b6016b76
MC
4253static void
4254bnx2_free_tx_skbs(struct bnx2 *bp)
4255{
4256 int i;
4257
4258 if (bp->tx_buf_ring == NULL)
4259 return;
4260
4261 for (i = 0; i < TX_DESC_CNT; ) {
4262 struct sw_bd *tx_buf = &bp->tx_buf_ring[i];
4263 struct sk_buff *skb = tx_buf->skb;
4264 int j, last;
4265
4266 if (skb == NULL) {
4267 i++;
4268 continue;
4269 }
4270
4271 pci_unmap_single(bp->pdev, pci_unmap_addr(tx_buf, mapping),
4272 skb_headlen(skb), PCI_DMA_TODEVICE);
4273
4274 tx_buf->skb = NULL;
4275
4276 last = skb_shinfo(skb)->nr_frags;
4277 for (j = 0; j < last; j++) {
4278 tx_buf = &bp->tx_buf_ring[i + j + 1];
4279 pci_unmap_page(bp->pdev,
4280 pci_unmap_addr(tx_buf, mapping),
4281 skb_shinfo(skb)->frags[j].size,
4282 PCI_DMA_TODEVICE);
4283 }
745720e5 4284 dev_kfree_skb(skb);
b6016b76
MC
4285 i += j + 1;
4286 }
4287
4288}
4289
4290static void
4291bnx2_free_rx_skbs(struct bnx2 *bp)
4292{
4293 int i;
4294
4295 if (bp->rx_buf_ring == NULL)
4296 return;
4297
13daffa2 4298 for (i = 0; i < bp->rx_max_ring_idx; i++) {
b6016b76
MC
4299 struct sw_bd *rx_buf = &bp->rx_buf_ring[i];
4300 struct sk_buff *skb = rx_buf->skb;
4301
05d0f1cf 4302 if (skb == NULL)
b6016b76
MC
4303 continue;
4304
4305 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
4306 bp->rx_buf_use_size, PCI_DMA_FROMDEVICE);
4307
4308 rx_buf->skb = NULL;
4309
745720e5 4310 dev_kfree_skb(skb);
b6016b76
MC
4311 }
4312}
4313
4314static void
4315bnx2_free_skbs(struct bnx2 *bp)
4316{
4317 bnx2_free_tx_skbs(bp);
4318 bnx2_free_rx_skbs(bp);
4319}
4320
4321static int
4322bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
4323{
4324 int rc;
4325
4326 rc = bnx2_reset_chip(bp, reset_code);
4327 bnx2_free_skbs(bp);
4328 if (rc)
4329 return rc;
4330
fba9fe91
MC
4331 if ((rc = bnx2_init_chip(bp)) != 0)
4332 return rc;
4333
b6016b76
MC
4334 bnx2_init_tx_ring(bp);
4335 bnx2_init_rx_ring(bp);
4336 return 0;
4337}
4338
4339static int
4340bnx2_init_nic(struct bnx2 *bp)
4341{
4342 int rc;
4343
4344 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
4345 return rc;
4346
80be4434 4347 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
4348 bnx2_init_phy(bp);
4349 bnx2_set_link(bp);
0d8a6571 4350 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
4351 return 0;
4352}
4353
4354static int
4355bnx2_test_registers(struct bnx2 *bp)
4356{
4357 int ret;
5bae30c9 4358 int i, is_5709;
f71e1309 4359 static const struct {
b6016b76
MC
4360 u16 offset;
4361 u16 flags;
5bae30c9 4362#define BNX2_FL_NOT_5709 1
b6016b76
MC
4363 u32 rw_mask;
4364 u32 ro_mask;
4365 } reg_tbl[] = {
4366 { 0x006c, 0, 0x00000000, 0x0000003f },
4367 { 0x0090, 0, 0xffffffff, 0x00000000 },
4368 { 0x0094, 0, 0x00000000, 0x00000000 },
4369
5bae30c9
MC
4370 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
4371 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4372 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4373 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
4374 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
4375 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4376 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
4377 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4378 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4379
4380 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4381 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
4382 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4383 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4384 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4385 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
4386
4387 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
4388 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
4389 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
b6016b76
MC
4390
4391 { 0x1000, 0, 0x00000000, 0x00000001 },
4392 { 0x1004, 0, 0x00000000, 0x000f0001 },
b6016b76
MC
4393
4394 { 0x1408, 0, 0x01c00800, 0x00000000 },
4395 { 0x149c, 0, 0x8000ffff, 0x00000000 },
4396 { 0x14a8, 0, 0x00000000, 0x000001ff },
5b0c76ad 4397 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
b6016b76
MC
4398 { 0x14b0, 0, 0x00000002, 0x00000001 },
4399 { 0x14b8, 0, 0x00000000, 0x00000000 },
4400 { 0x14c0, 0, 0x00000000, 0x00000009 },
4401 { 0x14c4, 0, 0x00003fff, 0x00000000 },
4402 { 0x14cc, 0, 0x00000000, 0x00000001 },
4403 { 0x14d0, 0, 0xffffffff, 0x00000000 },
b6016b76
MC
4404
4405 { 0x1800, 0, 0x00000000, 0x00000001 },
4406 { 0x1804, 0, 0x00000000, 0x00000003 },
b6016b76
MC
4407
4408 { 0x2800, 0, 0x00000000, 0x00000001 },
4409 { 0x2804, 0, 0x00000000, 0x00003f01 },
4410 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
4411 { 0x2810, 0, 0xffff0000, 0x00000000 },
4412 { 0x2814, 0, 0xffff0000, 0x00000000 },
4413 { 0x2818, 0, 0xffff0000, 0x00000000 },
4414 { 0x281c, 0, 0xffff0000, 0x00000000 },
4415 { 0x2834, 0, 0xffffffff, 0x00000000 },
4416 { 0x2840, 0, 0x00000000, 0xffffffff },
4417 { 0x2844, 0, 0x00000000, 0xffffffff },
4418 { 0x2848, 0, 0xffffffff, 0x00000000 },
4419 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
4420
4421 { 0x2c00, 0, 0x00000000, 0x00000011 },
4422 { 0x2c04, 0, 0x00000000, 0x00030007 },
4423
b6016b76
MC
4424 { 0x3c00, 0, 0x00000000, 0x00000001 },
4425 { 0x3c04, 0, 0x00000000, 0x00070000 },
4426 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
4427 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
4428 { 0x3c10, 0, 0xffffffff, 0x00000000 },
4429 { 0x3c14, 0, 0x00000000, 0xffffffff },
4430 { 0x3c18, 0, 0x00000000, 0xffffffff },
4431 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
4432 { 0x3c20, 0, 0xffffff00, 0x00000000 },
b6016b76
MC
4433
4434 { 0x5004, 0, 0x00000000, 0x0000007f },
4435 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
b6016b76 4436
b6016b76
MC
4437 { 0x5c00, 0, 0x00000000, 0x00000001 },
4438 { 0x5c04, 0, 0x00000000, 0x0003000f },
4439 { 0x5c08, 0, 0x00000003, 0x00000000 },
4440 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
4441 { 0x5c10, 0, 0x00000000, 0xffffffff },
4442 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
4443 { 0x5c84, 0, 0x00000000, 0x0000f333 },
4444 { 0x5c88, 0, 0x00000000, 0x00077373 },
4445 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
4446
4447 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
4448 { 0x680c, 0, 0xffffffff, 0x00000000 },
4449 { 0x6810, 0, 0xffffffff, 0x00000000 },
4450 { 0x6814, 0, 0xffffffff, 0x00000000 },
4451 { 0x6818, 0, 0xffffffff, 0x00000000 },
4452 { 0x681c, 0, 0xffffffff, 0x00000000 },
4453 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
4454 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
4455 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
4456 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
4457 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
4458 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
4459 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
4460 { 0x683c, 0, 0x0000ffff, 0x00000000 },
4461 { 0x6840, 0, 0x00000ff0, 0x00000000 },
4462 { 0x6844, 0, 0x00ffff00, 0x00000000 },
4463 { 0x684c, 0, 0xffffffff, 0x00000000 },
4464 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
4465 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
4466 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
4467 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
4468 { 0x6908, 0, 0x00000000, 0x0001ff0f },
4469 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
4470
4471 { 0xffff, 0, 0x00000000, 0x00000000 },
4472 };
4473
4474 ret = 0;
5bae30c9
MC
4475 is_5709 = 0;
4476 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4477 is_5709 = 1;
4478
b6016b76
MC
4479 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
4480 u32 offset, rw_mask, ro_mask, save_val, val;
5bae30c9
MC
4481 u16 flags = reg_tbl[i].flags;
4482
4483 if (is_5709 && (flags & BNX2_FL_NOT_5709))
4484 continue;
b6016b76
MC
4485
4486 offset = (u32) reg_tbl[i].offset;
4487 rw_mask = reg_tbl[i].rw_mask;
4488 ro_mask = reg_tbl[i].ro_mask;
4489
14ab9b86 4490 save_val = readl(bp->regview + offset);
b6016b76 4491
14ab9b86 4492 writel(0, bp->regview + offset);
b6016b76 4493
14ab9b86 4494 val = readl(bp->regview + offset);
b6016b76
MC
4495 if ((val & rw_mask) != 0) {
4496 goto reg_test_err;
4497 }
4498
4499 if ((val & ro_mask) != (save_val & ro_mask)) {
4500 goto reg_test_err;
4501 }
4502
14ab9b86 4503 writel(0xffffffff, bp->regview + offset);
b6016b76 4504
14ab9b86 4505 val = readl(bp->regview + offset);
b6016b76
MC
4506 if ((val & rw_mask) != rw_mask) {
4507 goto reg_test_err;
4508 }
4509
4510 if ((val & ro_mask) != (save_val & ro_mask)) {
4511 goto reg_test_err;
4512 }
4513
14ab9b86 4514 writel(save_val, bp->regview + offset);
b6016b76
MC
4515 continue;
4516
4517reg_test_err:
14ab9b86 4518 writel(save_val, bp->regview + offset);
b6016b76
MC
4519 ret = -ENODEV;
4520 break;
4521 }
4522 return ret;
4523}
4524
4525static int
4526bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
4527{
f71e1309 4528 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
b6016b76
MC
4529 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
4530 int i;
4531
4532 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
4533 u32 offset;
4534
4535 for (offset = 0; offset < size; offset += 4) {
4536
4537 REG_WR_IND(bp, start + offset, test_pattern[i]);
4538
4539 if (REG_RD_IND(bp, start + offset) !=
4540 test_pattern[i]) {
4541 return -ENODEV;
4542 }
4543 }
4544 }
4545 return 0;
4546}
4547
4548static int
4549bnx2_test_memory(struct bnx2 *bp)
4550{
4551 int ret = 0;
4552 int i;
5bae30c9 4553 static struct mem_entry {
b6016b76
MC
4554 u32 offset;
4555 u32 len;
5bae30c9 4556 } mem_tbl_5706[] = {
b6016b76 4557 { 0x60000, 0x4000 },
5b0c76ad 4558 { 0xa0000, 0x3000 },
b6016b76
MC
4559 { 0xe0000, 0x4000 },
4560 { 0x120000, 0x4000 },
4561 { 0x1a0000, 0x4000 },
4562 { 0x160000, 0x4000 },
4563 { 0xffffffff, 0 },
5bae30c9
MC
4564 },
4565 mem_tbl_5709[] = {
4566 { 0x60000, 0x4000 },
4567 { 0xa0000, 0x3000 },
4568 { 0xe0000, 0x4000 },
4569 { 0x120000, 0x4000 },
4570 { 0x1a0000, 0x4000 },
4571 { 0xffffffff, 0 },
b6016b76 4572 };
5bae30c9
MC
4573 struct mem_entry *mem_tbl;
4574
4575 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4576 mem_tbl = mem_tbl_5709;
4577 else
4578 mem_tbl = mem_tbl_5706;
b6016b76
MC
4579
4580 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
4581 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
4582 mem_tbl[i].len)) != 0) {
4583 return ret;
4584 }
4585 }
6aa20a22 4586
b6016b76
MC
4587 return ret;
4588}
4589
bc5a0690
MC
4590#define BNX2_MAC_LOOPBACK 0
4591#define BNX2_PHY_LOOPBACK 1
4592
b6016b76 4593static int
bc5a0690 4594bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
b6016b76
MC
4595{
4596 unsigned int pkt_size, num_pkts, i;
4597 struct sk_buff *skb, *rx_skb;
4598 unsigned char *packet;
bc5a0690 4599 u16 rx_start_idx, rx_idx;
b6016b76
MC
4600 dma_addr_t map;
4601 struct tx_bd *txbd;
4602 struct sw_bd *rx_buf;
4603 struct l2_fhdr *rx_hdr;
4604 int ret = -ENODEV;
4605
bc5a0690
MC
4606 if (loopback_mode == BNX2_MAC_LOOPBACK) {
4607 bp->loopback = MAC_LOOPBACK;
4608 bnx2_set_mac_loopback(bp);
4609 }
4610 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
80be4434 4611 bp->loopback = PHY_LOOPBACK;
bc5a0690
MC
4612 bnx2_set_phy_loopback(bp);
4613 }
4614 else
4615 return -EINVAL;
b6016b76
MC
4616
4617 pkt_size = 1514;
932f3772 4618 skb = netdev_alloc_skb(bp->dev, pkt_size);
b6cbc3b6
JL
4619 if (!skb)
4620 return -ENOMEM;
b6016b76 4621 packet = skb_put(skb, pkt_size);
6634292b 4622 memcpy(packet, bp->dev->dev_addr, 6);
b6016b76
MC
4623 memset(packet + 6, 0x0, 8);
4624 for (i = 14; i < pkt_size; i++)
4625 packet[i] = (unsigned char) (i & 0xff);
4626
4627 map = pci_map_single(bp->pdev, skb->data, pkt_size,
4628 PCI_DMA_TODEVICE);
4629
bf5295bb
MC
4630 REG_WR(bp, BNX2_HC_COMMAND,
4631 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4632
b6016b76
MC
4633 REG_RD(bp, BNX2_HC_COMMAND);
4634
4635 udelay(5);
4636 rx_start_idx = bp->status_blk->status_rx_quick_consumer_index0;
4637
b6016b76
MC
4638 num_pkts = 0;
4639
bc5a0690 4640 txbd = &bp->tx_desc_ring[TX_RING_IDX(bp->tx_prod)];
b6016b76
MC
4641
4642 txbd->tx_bd_haddr_hi = (u64) map >> 32;
4643 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
4644 txbd->tx_bd_mss_nbytes = pkt_size;
4645 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
4646
4647 num_pkts++;
bc5a0690
MC
4648 bp->tx_prod = NEXT_TX_BD(bp->tx_prod);
4649 bp->tx_prod_bseq += pkt_size;
b6016b76 4650
234754d5
MC
4651 REG_WR16(bp, bp->tx_bidx_addr, bp->tx_prod);
4652 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
b6016b76
MC
4653
4654 udelay(100);
4655
bf5295bb
MC
4656 REG_WR(bp, BNX2_HC_COMMAND,
4657 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
4658
b6016b76
MC
4659 REG_RD(bp, BNX2_HC_COMMAND);
4660
4661 udelay(5);
4662
4663 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
745720e5 4664 dev_kfree_skb(skb);
b6016b76 4665
bc5a0690 4666 if (bp->status_blk->status_tx_quick_consumer_index0 != bp->tx_prod) {
b6016b76
MC
4667 goto loopback_test_done;
4668 }
4669
4670 rx_idx = bp->status_blk->status_rx_quick_consumer_index0;
4671 if (rx_idx != rx_start_idx + num_pkts) {
4672 goto loopback_test_done;
4673 }
4674
4675 rx_buf = &bp->rx_buf_ring[rx_start_idx];
4676 rx_skb = rx_buf->skb;
4677
4678 rx_hdr = (struct l2_fhdr *) rx_skb->data;
4679 skb_reserve(rx_skb, bp->rx_offset);
4680
4681 pci_dma_sync_single_for_cpu(bp->pdev,
4682 pci_unmap_addr(rx_buf, mapping),
4683 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4684
ade2bfe7 4685 if (rx_hdr->l2_fhdr_status &
b6016b76
MC
4686 (L2_FHDR_ERRORS_BAD_CRC |
4687 L2_FHDR_ERRORS_PHY_DECODE |
4688 L2_FHDR_ERRORS_ALIGNMENT |
4689 L2_FHDR_ERRORS_TOO_SHORT |
4690 L2_FHDR_ERRORS_GIANT_FRAME)) {
4691
4692 goto loopback_test_done;
4693 }
4694
4695 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
4696 goto loopback_test_done;
4697 }
4698
4699 for (i = 14; i < pkt_size; i++) {
4700 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
4701 goto loopback_test_done;
4702 }
4703 }
4704
4705 ret = 0;
4706
4707loopback_test_done:
4708 bp->loopback = 0;
4709 return ret;
4710}
4711
bc5a0690
MC
4712#define BNX2_MAC_LOOPBACK_FAILED 1
4713#define BNX2_PHY_LOOPBACK_FAILED 2
4714#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
4715 BNX2_PHY_LOOPBACK_FAILED)
4716
4717static int
4718bnx2_test_loopback(struct bnx2 *bp)
4719{
4720 int rc = 0;
4721
4722 if (!netif_running(bp->dev))
4723 return BNX2_LOOPBACK_FAILED;
4724
4725 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
4726 spin_lock_bh(&bp->phy_lock);
4727 bnx2_init_phy(bp);
4728 spin_unlock_bh(&bp->phy_lock);
4729 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
4730 rc |= BNX2_MAC_LOOPBACK_FAILED;
4731 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
4732 rc |= BNX2_PHY_LOOPBACK_FAILED;
4733 return rc;
4734}
4735
b6016b76
MC
4736#define NVRAM_SIZE 0x200
4737#define CRC32_RESIDUAL 0xdebb20e3
4738
4739static int
4740bnx2_test_nvram(struct bnx2 *bp)
4741{
4742 u32 buf[NVRAM_SIZE / 4];
4743 u8 *data = (u8 *) buf;
4744 int rc = 0;
4745 u32 magic, csum;
4746
4747 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
4748 goto test_nvram_done;
4749
4750 magic = be32_to_cpu(buf[0]);
4751 if (magic != 0x669955aa) {
4752 rc = -ENODEV;
4753 goto test_nvram_done;
4754 }
4755
4756 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
4757 goto test_nvram_done;
4758
4759 csum = ether_crc_le(0x100, data);
4760 if (csum != CRC32_RESIDUAL) {
4761 rc = -ENODEV;
4762 goto test_nvram_done;
4763 }
4764
4765 csum = ether_crc_le(0x100, data + 0x100);
4766 if (csum != CRC32_RESIDUAL) {
4767 rc = -ENODEV;
4768 }
4769
4770test_nvram_done:
4771 return rc;
4772}
4773
4774static int
4775bnx2_test_link(struct bnx2 *bp)
4776{
4777 u32 bmsr;
4778
c770a65c 4779 spin_lock_bh(&bp->phy_lock);
27a005b8
MC
4780 bnx2_enable_bmsr1(bp);
4781 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4782 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
4783 bnx2_disable_bmsr1(bp);
c770a65c 4784 spin_unlock_bh(&bp->phy_lock);
6aa20a22 4785
b6016b76
MC
4786 if (bmsr & BMSR_LSTATUS) {
4787 return 0;
4788 }
4789 return -ENODEV;
4790}
4791
4792static int
4793bnx2_test_intr(struct bnx2 *bp)
4794{
4795 int i;
b6016b76
MC
4796 u16 status_idx;
4797
4798 if (!netif_running(bp->dev))
4799 return -ENODEV;
4800
4801 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
4802
4803 /* This register is not touched during run-time. */
bf5295bb 4804 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
b6016b76
MC
4805 REG_RD(bp, BNX2_HC_COMMAND);
4806
4807 for (i = 0; i < 10; i++) {
4808 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
4809 status_idx) {
4810
4811 break;
4812 }
4813
4814 msleep_interruptible(10);
4815 }
4816 if (i < 10)
4817 return 0;
4818
4819 return -ENODEV;
4820}
4821
4822static void
48b01e2d 4823bnx2_5706_serdes_timer(struct bnx2 *bp)
b6016b76 4824{
48b01e2d
MC
4825 spin_lock(&bp->phy_lock);
4826 if (bp->serdes_an_pending)
4827 bp->serdes_an_pending--;
4828 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4829 u32 bmcr;
b6016b76 4830
48b01e2d 4831 bp->current_interval = bp->timer_interval;
cd339a0e 4832
ca58c3af 4833 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76 4834
48b01e2d
MC
4835 if (bmcr & BMCR_ANENABLE) {
4836 u32 phy1, phy2;
b6016b76 4837
48b01e2d
MC
4838 bnx2_write_phy(bp, 0x1c, 0x7c00);
4839 bnx2_read_phy(bp, 0x1c, &phy1);
cea94db9 4840
48b01e2d
MC
4841 bnx2_write_phy(bp, 0x17, 0x0f01);
4842 bnx2_read_phy(bp, 0x15, &phy2);
4843 bnx2_write_phy(bp, 0x17, 0x0f01);
4844 bnx2_read_phy(bp, 0x15, &phy2);
b6016b76 4845
48b01e2d
MC
4846 if ((phy1 & 0x10) && /* SIGNAL DETECT */
4847 !(phy2 & 0x20)) { /* no CONFIG */
4848
4849 bmcr &= ~BMCR_ANENABLE;
4850 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
ca58c3af 4851 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
48b01e2d
MC
4852 bp->phy_flags |= PHY_PARALLEL_DETECT_FLAG;
4853 }
b6016b76 4854 }
48b01e2d
MC
4855 }
4856 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
4857 (bp->phy_flags & PHY_PARALLEL_DETECT_FLAG)) {
4858 u32 phy2;
b6016b76 4859
48b01e2d
MC
4860 bnx2_write_phy(bp, 0x17, 0x0f01);
4861 bnx2_read_phy(bp, 0x15, &phy2);
4862 if (phy2 & 0x20) {
4863 u32 bmcr;
cd339a0e 4864
ca58c3af 4865 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
48b01e2d 4866 bmcr |= BMCR_ANENABLE;
ca58c3af 4867 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
b6016b76 4868
48b01e2d
MC
4869 bp->phy_flags &= ~PHY_PARALLEL_DETECT_FLAG;
4870 }
4871 } else
4872 bp->current_interval = bp->timer_interval;
b6016b76 4873
48b01e2d
MC
4874 spin_unlock(&bp->phy_lock);
4875}
b6016b76 4876
f8dd064e
MC
4877static void
4878bnx2_5708_serdes_timer(struct bnx2 *bp)
4879{
0d8a6571
MC
4880 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
4881 return;
4882
f8dd064e
MC
4883 if ((bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) == 0) {
4884 bp->serdes_an_pending = 0;
4885 return;
4886 }
b6016b76 4887
f8dd064e
MC
4888 spin_lock(&bp->phy_lock);
4889 if (bp->serdes_an_pending)
4890 bp->serdes_an_pending--;
4891 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
4892 u32 bmcr;
b6016b76 4893
ca58c3af 4894 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
f8dd064e 4895 if (bmcr & BMCR_ANENABLE) {
605a9e20 4896 bnx2_enable_forced_2g5(bp);
f8dd064e
MC
4897 bp->current_interval = SERDES_FORCED_TIMEOUT;
4898 } else {
605a9e20 4899 bnx2_disable_forced_2g5(bp);
f8dd064e
MC
4900 bp->serdes_an_pending = 2;
4901 bp->current_interval = bp->timer_interval;
b6016b76 4902 }
b6016b76 4903
f8dd064e
MC
4904 } else
4905 bp->current_interval = bp->timer_interval;
b6016b76 4906
f8dd064e
MC
4907 spin_unlock(&bp->phy_lock);
4908}
4909
48b01e2d
MC
4910static void
4911bnx2_timer(unsigned long data)
4912{
4913 struct bnx2 *bp = (struct bnx2 *) data;
4914 u32 msg;
b6016b76 4915
48b01e2d
MC
4916 if (!netif_running(bp->dev))
4917 return;
b6016b76 4918
48b01e2d
MC
4919 if (atomic_read(&bp->intr_sem) != 0)
4920 goto bnx2_restart_timer;
b6016b76 4921
48b01e2d
MC
4922 msg = (u32) ++bp->fw_drv_pulse_wr_seq;
4923 REG_WR_IND(bp, bp->shmem_base + BNX2_DRV_PULSE_MB, msg);
b6016b76 4924
48b01e2d 4925 bp->stats_blk->stat_FwRxDrop = REG_RD_IND(bp, BNX2_FW_RX_DROP_COUNT);
b6016b76 4926
02537b06
MC
4927 /* workaround occasional corrupted counters */
4928 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
4929 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
4930 BNX2_HC_COMMAND_STATS_NOW);
4931
f8dd064e
MC
4932 if (bp->phy_flags & PHY_SERDES_FLAG) {
4933 if (CHIP_NUM(bp) == CHIP_NUM_5706)
4934 bnx2_5706_serdes_timer(bp);
27a005b8 4935 else
f8dd064e 4936 bnx2_5708_serdes_timer(bp);
b6016b76
MC
4937 }
4938
4939bnx2_restart_timer:
cd339a0e 4940 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
4941}
4942
8e6a72c4
MC
4943static int
4944bnx2_request_irq(struct bnx2 *bp)
4945{
4946 struct net_device *dev = bp->dev;
4947 int rc = 0;
4948
4949 if (bp->flags & USING_MSI_FLAG) {
4950 irq_handler_t fn = bnx2_msi;
4951
4952 if (bp->flags & ONE_SHOT_MSI_FLAG)
4953 fn = bnx2_msi_1shot;
4954
4955 rc = request_irq(bp->pdev->irq, fn, 0, dev->name, dev);
4956 } else
4957 rc = request_irq(bp->pdev->irq, bnx2_interrupt,
4958 IRQF_SHARED, dev->name, dev);
4959 return rc;
4960}
4961
4962static void
4963bnx2_free_irq(struct bnx2 *bp)
4964{
4965 struct net_device *dev = bp->dev;
4966
4967 if (bp->flags & USING_MSI_FLAG) {
4968 free_irq(bp->pdev->irq, dev);
4969 pci_disable_msi(bp->pdev);
4970 bp->flags &= ~(USING_MSI_FLAG | ONE_SHOT_MSI_FLAG);
4971 } else
4972 free_irq(bp->pdev->irq, dev);
4973}
4974
b6016b76
MC
4975/* Called with rtnl_lock */
4976static int
4977bnx2_open(struct net_device *dev)
4978{
972ec0d4 4979 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
4980 int rc;
4981
1b2f922f
MC
4982 netif_carrier_off(dev);
4983
829ca9a3 4984 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
4985 bnx2_disable_int(bp);
4986
4987 rc = bnx2_alloc_mem(bp);
4988 if (rc)
4989 return rc;
4990
8e6a72c4 4991 if ((bp->flags & MSI_CAP_FLAG) && !disable_msi) {
b6016b76
MC
4992 if (pci_enable_msi(bp->pdev) == 0) {
4993 bp->flags |= USING_MSI_FLAG;
8e6a72c4
MC
4994 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4995 bp->flags |= ONE_SHOT_MSI_FLAG;
b6016b76 4996 }
b6016b76 4997 }
8e6a72c4
MC
4998 rc = bnx2_request_irq(bp);
4999
b6016b76
MC
5000 if (rc) {
5001 bnx2_free_mem(bp);
5002 return rc;
5003 }
5004
5005 rc = bnx2_init_nic(bp);
5006
5007 if (rc) {
8e6a72c4 5008 bnx2_free_irq(bp);
b6016b76
MC
5009 bnx2_free_skbs(bp);
5010 bnx2_free_mem(bp);
5011 return rc;
5012 }
6aa20a22 5013
cd339a0e 5014 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
5015
5016 atomic_set(&bp->intr_sem, 0);
5017
5018 bnx2_enable_int(bp);
5019
5020 if (bp->flags & USING_MSI_FLAG) {
5021 /* Test MSI to make sure it is working
5022 * If MSI test fails, go back to INTx mode
5023 */
5024 if (bnx2_test_intr(bp) != 0) {
5025 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5026 " using MSI, switching to INTx mode. Please"
5027 " report this failure to the PCI maintainer"
5028 " and include system chipset information.\n",
5029 bp->dev->name);
5030
5031 bnx2_disable_int(bp);
8e6a72c4 5032 bnx2_free_irq(bp);
b6016b76
MC
5033
5034 rc = bnx2_init_nic(bp);
5035
8e6a72c4
MC
5036 if (!rc)
5037 rc = bnx2_request_irq(bp);
5038
b6016b76
MC
5039 if (rc) {
5040 bnx2_free_skbs(bp);
5041 bnx2_free_mem(bp);
5042 del_timer_sync(&bp->timer);
5043 return rc;
5044 }
5045 bnx2_enable_int(bp);
5046 }
5047 }
5048 if (bp->flags & USING_MSI_FLAG) {
5049 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
5050 }
5051
5052 netif_start_queue(dev);
5053
5054 return 0;
5055}
5056
5057static void
c4028958 5058bnx2_reset_task(struct work_struct *work)
b6016b76 5059{
c4028958 5060 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
b6016b76 5061
afdc08b9
MC
5062 if (!netif_running(bp->dev))
5063 return;
5064
5065 bp->in_reset_task = 1;
b6016b76
MC
5066 bnx2_netif_stop(bp);
5067
5068 bnx2_init_nic(bp);
5069
5070 atomic_set(&bp->intr_sem, 1);
5071 bnx2_netif_start(bp);
afdc08b9 5072 bp->in_reset_task = 0;
b6016b76
MC
5073}
5074
5075static void
5076bnx2_tx_timeout(struct net_device *dev)
5077{
972ec0d4 5078 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5079
5080 /* This allows the netif to be shutdown gracefully before resetting */
5081 schedule_work(&bp->reset_task);
5082}
5083
5084#ifdef BCM_VLAN
5085/* Called with rtnl_lock */
5086static void
5087bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
5088{
972ec0d4 5089 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5090
5091 bnx2_netif_stop(bp);
5092
5093 bp->vlgrp = vlgrp;
5094 bnx2_set_rx_mode(dev);
5095
5096 bnx2_netif_start(bp);
5097}
b6016b76
MC
5098#endif
5099
932ff279 5100/* Called with netif_tx_lock.
2f8af120
MC
5101 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
5102 * netif_wake_queue().
b6016b76
MC
5103 */
5104static int
5105bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
5106{
972ec0d4 5107 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5108 dma_addr_t mapping;
5109 struct tx_bd *txbd;
5110 struct sw_bd *tx_buf;
5111 u32 len, vlan_tag_flags, last_frag, mss;
5112 u16 prod, ring_prod;
5113 int i;
5114
e89bbf10 5115 if (unlikely(bnx2_tx_avail(bp) < (skb_shinfo(skb)->nr_frags + 1))) {
b6016b76
MC
5116 netif_stop_queue(dev);
5117 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
5118 dev->name);
5119
5120 return NETDEV_TX_BUSY;
5121 }
5122 len = skb_headlen(skb);
5123 prod = bp->tx_prod;
5124 ring_prod = TX_RING_IDX(prod);
5125
5126 vlan_tag_flags = 0;
84fa7933 5127 if (skb->ip_summed == CHECKSUM_PARTIAL) {
b6016b76
MC
5128 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
5129 }
5130
5131 if (bp->vlgrp != 0 && vlan_tx_tag_present(skb)) {
5132 vlan_tag_flags |=
5133 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
5134 }
fde82055 5135 if ((mss = skb_shinfo(skb)->gso_size)) {
b6016b76 5136 u32 tcp_opt_len, ip_tcp_len;
eddc9ec5 5137 struct iphdr *iph;
b6016b76 5138
b6016b76
MC
5139 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
5140
4666f87a
MC
5141 tcp_opt_len = tcp_optlen(skb);
5142
5143 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
5144 u32 tcp_off = skb_transport_offset(skb) -
5145 sizeof(struct ipv6hdr) - ETH_HLEN;
ab6a5bb6 5146
4666f87a
MC
5147 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
5148 TX_BD_FLAGS_SW_FLAGS;
5149 if (likely(tcp_off == 0))
5150 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
5151 else {
5152 tcp_off >>= 3;
5153 vlan_tag_flags |= ((tcp_off & 0x3) <<
5154 TX_BD_FLAGS_TCP6_OFF0_SHL) |
5155 ((tcp_off & 0x10) <<
5156 TX_BD_FLAGS_TCP6_OFF4_SHL);
5157 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
5158 }
5159 } else {
5160 if (skb_header_cloned(skb) &&
5161 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
5162 dev_kfree_skb(skb);
5163 return NETDEV_TX_OK;
5164 }
b6016b76 5165
4666f87a
MC
5166 ip_tcp_len = ip_hdrlen(skb) + sizeof(struct tcphdr);
5167
5168 iph = ip_hdr(skb);
5169 iph->check = 0;
5170 iph->tot_len = htons(mss + ip_tcp_len + tcp_opt_len);
5171 tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
5172 iph->daddr, 0,
5173 IPPROTO_TCP,
5174 0);
5175 if (tcp_opt_len || (iph->ihl > 5)) {
5176 vlan_tag_flags |= ((iph->ihl - 5) +
5177 (tcp_opt_len >> 2)) << 8;
5178 }
b6016b76 5179 }
4666f87a 5180 } else
b6016b76 5181 mss = 0;
b6016b76
MC
5182
5183 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6aa20a22 5184
b6016b76
MC
5185 tx_buf = &bp->tx_buf_ring[ring_prod];
5186 tx_buf->skb = skb;
5187 pci_unmap_addr_set(tx_buf, mapping, mapping);
5188
5189 txbd = &bp->tx_desc_ring[ring_prod];
5190
5191 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5192 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5193 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5194 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
5195
5196 last_frag = skb_shinfo(skb)->nr_frags;
5197
5198 for (i = 0; i < last_frag; i++) {
5199 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
5200
5201 prod = NEXT_TX_BD(prod);
5202 ring_prod = TX_RING_IDX(prod);
5203 txbd = &bp->tx_desc_ring[ring_prod];
5204
5205 len = frag->size;
5206 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
5207 len, PCI_DMA_TODEVICE);
5208 pci_unmap_addr_set(&bp->tx_buf_ring[ring_prod],
5209 mapping, mapping);
5210
5211 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
5212 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
5213 txbd->tx_bd_mss_nbytes = len | (mss << 16);
5214 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
5215
5216 }
5217 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
5218
5219 prod = NEXT_TX_BD(prod);
5220 bp->tx_prod_bseq += skb->len;
5221
234754d5
MC
5222 REG_WR16(bp, bp->tx_bidx_addr, prod);
5223 REG_WR(bp, bp->tx_bseq_addr, bp->tx_prod_bseq);
b6016b76
MC
5224
5225 mmiowb();
5226
5227 bp->tx_prod = prod;
5228 dev->trans_start = jiffies;
5229
e89bbf10 5230 if (unlikely(bnx2_tx_avail(bp) <= MAX_SKB_FRAGS)) {
e89bbf10 5231 netif_stop_queue(dev);
2f8af120 5232 if (bnx2_tx_avail(bp) > bp->tx_wake_thresh)
e89bbf10 5233 netif_wake_queue(dev);
b6016b76
MC
5234 }
5235
5236 return NETDEV_TX_OK;
5237}
5238
5239/* Called with rtnl_lock */
5240static int
5241bnx2_close(struct net_device *dev)
5242{
972ec0d4 5243 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5244 u32 reset_code;
5245
afdc08b9
MC
5246 /* Calling flush_scheduled_work() may deadlock because
5247 * linkwatch_event() may be on the workqueue and it will try to get
5248 * the rtnl_lock which we are holding.
5249 */
5250 while (bp->in_reset_task)
5251 msleep(1);
5252
b6016b76
MC
5253 bnx2_netif_stop(bp);
5254 del_timer_sync(&bp->timer);
dda1e390 5255 if (bp->flags & NO_WOL_FLAG)
6c4f095e 5256 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
dda1e390 5257 else if (bp->wol)
b6016b76
MC
5258 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5259 else
5260 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5261 bnx2_reset_chip(bp, reset_code);
8e6a72c4 5262 bnx2_free_irq(bp);
b6016b76
MC
5263 bnx2_free_skbs(bp);
5264 bnx2_free_mem(bp);
5265 bp->link_up = 0;
5266 netif_carrier_off(bp->dev);
829ca9a3 5267 bnx2_set_power_state(bp, PCI_D3hot);
b6016b76
MC
5268 return 0;
5269}
5270
5271#define GET_NET_STATS64(ctr) \
5272 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
5273 (unsigned long) (ctr##_lo)
5274
5275#define GET_NET_STATS32(ctr) \
5276 (ctr##_lo)
5277
5278#if (BITS_PER_LONG == 64)
5279#define GET_NET_STATS GET_NET_STATS64
5280#else
5281#define GET_NET_STATS GET_NET_STATS32
5282#endif
5283
5284static struct net_device_stats *
5285bnx2_get_stats(struct net_device *dev)
5286{
972ec0d4 5287 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5288 struct statistics_block *stats_blk = bp->stats_blk;
5289 struct net_device_stats *net_stats = &bp->net_stats;
5290
5291 if (bp->stats_blk == NULL) {
5292 return net_stats;
5293 }
5294 net_stats->rx_packets =
5295 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
5296 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
5297 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
5298
5299 net_stats->tx_packets =
5300 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
5301 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
5302 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
5303
5304 net_stats->rx_bytes =
5305 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
5306
5307 net_stats->tx_bytes =
5308 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
5309
6aa20a22 5310 net_stats->multicast =
b6016b76
MC
5311 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
5312
6aa20a22 5313 net_stats->collisions =
b6016b76
MC
5314 (unsigned long) stats_blk->stat_EtherStatsCollisions;
5315
6aa20a22 5316 net_stats->rx_length_errors =
b6016b76
MC
5317 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
5318 stats_blk->stat_EtherStatsOverrsizePkts);
5319
6aa20a22 5320 net_stats->rx_over_errors =
b6016b76
MC
5321 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
5322
6aa20a22 5323 net_stats->rx_frame_errors =
b6016b76
MC
5324 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
5325
6aa20a22 5326 net_stats->rx_crc_errors =
b6016b76
MC
5327 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
5328
5329 net_stats->rx_errors = net_stats->rx_length_errors +
5330 net_stats->rx_over_errors + net_stats->rx_frame_errors +
5331 net_stats->rx_crc_errors;
5332
5333 net_stats->tx_aborted_errors =
5334 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
5335 stats_blk->stat_Dot3StatsLateCollisions);
5336
5b0c76ad
MC
5337 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
5338 (CHIP_ID(bp) == CHIP_ID_5708_A0))
b6016b76
MC
5339 net_stats->tx_carrier_errors = 0;
5340 else {
5341 net_stats->tx_carrier_errors =
5342 (unsigned long)
5343 stats_blk->stat_Dot3StatsCarrierSenseErrors;
5344 }
5345
5346 net_stats->tx_errors =
6aa20a22 5347 (unsigned long)
b6016b76
MC
5348 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
5349 +
5350 net_stats->tx_aborted_errors +
5351 net_stats->tx_carrier_errors;
5352
cea94db9
MC
5353 net_stats->rx_missed_errors =
5354 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
5355 stats_blk->stat_FwRxDrop);
5356
b6016b76
MC
5357 return net_stats;
5358}
5359
5360/* All ethtool functions called with rtnl_lock */
5361
5362static int
5363bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5364{
972ec0d4 5365 struct bnx2 *bp = netdev_priv(dev);
7b6b8347 5366 int support_serdes = 0, support_copper = 0;
b6016b76
MC
5367
5368 cmd->supported = SUPPORTED_Autoneg;
7b6b8347
MC
5369 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5370 support_serdes = 1;
5371 support_copper = 1;
5372 } else if (bp->phy_port == PORT_FIBRE)
5373 support_serdes = 1;
5374 else
5375 support_copper = 1;
5376
5377 if (support_serdes) {
b6016b76
MC
5378 cmd->supported |= SUPPORTED_1000baseT_Full |
5379 SUPPORTED_FIBRE;
605a9e20
MC
5380 if (bp->phy_flags & PHY_2_5G_CAPABLE_FLAG)
5381 cmd->supported |= SUPPORTED_2500baseX_Full;
b6016b76 5382
b6016b76 5383 }
7b6b8347 5384 if (support_copper) {
b6016b76
MC
5385 cmd->supported |= SUPPORTED_10baseT_Half |
5386 SUPPORTED_10baseT_Full |
5387 SUPPORTED_100baseT_Half |
5388 SUPPORTED_100baseT_Full |
5389 SUPPORTED_1000baseT_Full |
5390 SUPPORTED_TP;
5391
b6016b76
MC
5392 }
5393
7b6b8347
MC
5394 spin_lock_bh(&bp->phy_lock);
5395 cmd->port = bp->phy_port;
b6016b76
MC
5396 cmd->advertising = bp->advertising;
5397
5398 if (bp->autoneg & AUTONEG_SPEED) {
5399 cmd->autoneg = AUTONEG_ENABLE;
5400 }
5401 else {
5402 cmd->autoneg = AUTONEG_DISABLE;
5403 }
5404
5405 if (netif_carrier_ok(dev)) {
5406 cmd->speed = bp->line_speed;
5407 cmd->duplex = bp->duplex;
5408 }
5409 else {
5410 cmd->speed = -1;
5411 cmd->duplex = -1;
5412 }
7b6b8347 5413 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5414
5415 cmd->transceiver = XCVR_INTERNAL;
5416 cmd->phy_address = bp->phy_addr;
5417
5418 return 0;
5419}
6aa20a22 5420
b6016b76
MC
5421static int
5422bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5423{
972ec0d4 5424 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5425 u8 autoneg = bp->autoneg;
5426 u8 req_duplex = bp->req_duplex;
5427 u16 req_line_speed = bp->req_line_speed;
5428 u32 advertising = bp->advertising;
7b6b8347
MC
5429 int err = -EINVAL;
5430
5431 spin_lock_bh(&bp->phy_lock);
5432
5433 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
5434 goto err_out_unlock;
5435
5436 if (cmd->port != bp->phy_port && !(bp->phy_flags & REMOTE_PHY_CAP_FLAG))
5437 goto err_out_unlock;
b6016b76
MC
5438
5439 if (cmd->autoneg == AUTONEG_ENABLE) {
5440 autoneg |= AUTONEG_SPEED;
5441
6aa20a22 5442 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
b6016b76
MC
5443
5444 /* allow advertising 1 speed */
5445 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
5446 (cmd->advertising == ADVERTISED_10baseT_Full) ||
5447 (cmd->advertising == ADVERTISED_100baseT_Half) ||
5448 (cmd->advertising == ADVERTISED_100baseT_Full)) {
5449
7b6b8347
MC
5450 if (cmd->port == PORT_FIBRE)
5451 goto err_out_unlock;
b6016b76
MC
5452
5453 advertising = cmd->advertising;
5454
27a005b8 5455 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
7b6b8347
MC
5456 if (!(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG) ||
5457 (cmd->port == PORT_TP))
5458 goto err_out_unlock;
5459 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
b6016b76 5460 advertising = cmd->advertising;
7b6b8347
MC
5461 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
5462 goto err_out_unlock;
b6016b76 5463 else {
7b6b8347 5464 if (cmd->port == PORT_FIBRE)
b6016b76 5465 advertising = ETHTOOL_ALL_FIBRE_SPEED;
7b6b8347 5466 else
b6016b76 5467 advertising = ETHTOOL_ALL_COPPER_SPEED;
b6016b76
MC
5468 }
5469 advertising |= ADVERTISED_Autoneg;
5470 }
5471 else {
7b6b8347 5472 if (cmd->port == PORT_FIBRE) {
80be4434
MC
5473 if ((cmd->speed != SPEED_1000 &&
5474 cmd->speed != SPEED_2500) ||
5475 (cmd->duplex != DUPLEX_FULL))
7b6b8347 5476 goto err_out_unlock;
80be4434
MC
5477
5478 if (cmd->speed == SPEED_2500 &&
5479 !(bp->phy_flags & PHY_2_5G_CAPABLE_FLAG))
7b6b8347 5480 goto err_out_unlock;
b6016b76 5481 }
7b6b8347
MC
5482 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
5483 goto err_out_unlock;
5484
b6016b76
MC
5485 autoneg &= ~AUTONEG_SPEED;
5486 req_line_speed = cmd->speed;
5487 req_duplex = cmd->duplex;
5488 advertising = 0;
5489 }
5490
5491 bp->autoneg = autoneg;
5492 bp->advertising = advertising;
5493 bp->req_line_speed = req_line_speed;
5494 bp->req_duplex = req_duplex;
5495
7b6b8347 5496 err = bnx2_setup_phy(bp, cmd->port);
b6016b76 5497
7b6b8347 5498err_out_unlock:
c770a65c 5499 spin_unlock_bh(&bp->phy_lock);
b6016b76 5500
7b6b8347 5501 return err;
b6016b76
MC
5502}
5503
5504static void
5505bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5506{
972ec0d4 5507 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5508
5509 strcpy(info->driver, DRV_MODULE_NAME);
5510 strcpy(info->version, DRV_MODULE_VERSION);
5511 strcpy(info->bus_info, pci_name(bp->pdev));
5512 info->fw_version[0] = ((bp->fw_ver & 0xff000000) >> 24) + '0';
5513 info->fw_version[2] = ((bp->fw_ver & 0xff0000) >> 16) + '0';
5514 info->fw_version[4] = ((bp->fw_ver & 0xff00) >> 8) + '0';
206cc83c
MC
5515 info->fw_version[1] = info->fw_version[3] = '.';
5516 info->fw_version[5] = 0;
b6016b76
MC
5517}
5518
244ac4f4
MC
5519#define BNX2_REGDUMP_LEN (32 * 1024)
5520
5521static int
5522bnx2_get_regs_len(struct net_device *dev)
5523{
5524 return BNX2_REGDUMP_LEN;
5525}
5526
5527static void
5528bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
5529{
5530 u32 *p = _p, i, offset;
5531 u8 *orig_p = _p;
5532 struct bnx2 *bp = netdev_priv(dev);
5533 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
5534 0x0800, 0x0880, 0x0c00, 0x0c10,
5535 0x0c30, 0x0d08, 0x1000, 0x101c,
5536 0x1040, 0x1048, 0x1080, 0x10a4,
5537 0x1400, 0x1490, 0x1498, 0x14f0,
5538 0x1500, 0x155c, 0x1580, 0x15dc,
5539 0x1600, 0x1658, 0x1680, 0x16d8,
5540 0x1800, 0x1820, 0x1840, 0x1854,
5541 0x1880, 0x1894, 0x1900, 0x1984,
5542 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
5543 0x1c80, 0x1c94, 0x1d00, 0x1d84,
5544 0x2000, 0x2030, 0x23c0, 0x2400,
5545 0x2800, 0x2820, 0x2830, 0x2850,
5546 0x2b40, 0x2c10, 0x2fc0, 0x3058,
5547 0x3c00, 0x3c94, 0x4000, 0x4010,
5548 0x4080, 0x4090, 0x43c0, 0x4458,
5549 0x4c00, 0x4c18, 0x4c40, 0x4c54,
5550 0x4fc0, 0x5010, 0x53c0, 0x5444,
5551 0x5c00, 0x5c18, 0x5c80, 0x5c90,
5552 0x5fc0, 0x6000, 0x6400, 0x6428,
5553 0x6800, 0x6848, 0x684c, 0x6860,
5554 0x6888, 0x6910, 0x8000 };
5555
5556 regs->version = 0;
5557
5558 memset(p, 0, BNX2_REGDUMP_LEN);
5559
5560 if (!netif_running(bp->dev))
5561 return;
5562
5563 i = 0;
5564 offset = reg_boundaries[0];
5565 p += offset;
5566 while (offset < BNX2_REGDUMP_LEN) {
5567 *p++ = REG_RD(bp, offset);
5568 offset += 4;
5569 if (offset == reg_boundaries[i + 1]) {
5570 offset = reg_boundaries[i + 2];
5571 p = (u32 *) (orig_p + offset);
5572 i += 2;
5573 }
5574 }
5575}
5576
b6016b76
MC
5577static void
5578bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5579{
972ec0d4 5580 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5581
5582 if (bp->flags & NO_WOL_FLAG) {
5583 wol->supported = 0;
5584 wol->wolopts = 0;
5585 }
5586 else {
5587 wol->supported = WAKE_MAGIC;
5588 if (bp->wol)
5589 wol->wolopts = WAKE_MAGIC;
5590 else
5591 wol->wolopts = 0;
5592 }
5593 memset(&wol->sopass, 0, sizeof(wol->sopass));
5594}
5595
5596static int
5597bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5598{
972ec0d4 5599 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5600
5601 if (wol->wolopts & ~WAKE_MAGIC)
5602 return -EINVAL;
5603
5604 if (wol->wolopts & WAKE_MAGIC) {
5605 if (bp->flags & NO_WOL_FLAG)
5606 return -EINVAL;
5607
5608 bp->wol = 1;
5609 }
5610 else {
5611 bp->wol = 0;
5612 }
5613 return 0;
5614}
5615
5616static int
5617bnx2_nway_reset(struct net_device *dev)
5618{
972ec0d4 5619 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5620 u32 bmcr;
5621
5622 if (!(bp->autoneg & AUTONEG_SPEED)) {
5623 return -EINVAL;
5624 }
5625
c770a65c 5626 spin_lock_bh(&bp->phy_lock);
b6016b76 5627
7b6b8347
MC
5628 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG) {
5629 int rc;
5630
5631 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
5632 spin_unlock_bh(&bp->phy_lock);
5633 return rc;
5634 }
5635
b6016b76
MC
5636 /* Force a link down visible on the other side */
5637 if (bp->phy_flags & PHY_SERDES_FLAG) {
ca58c3af 5638 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
c770a65c 5639 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5640
5641 msleep(20);
5642
c770a65c 5643 spin_lock_bh(&bp->phy_lock);
f8dd064e
MC
5644
5645 bp->current_interval = SERDES_AN_TIMEOUT;
5646 bp->serdes_an_pending = 1;
5647 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
5648 }
5649
ca58c3af 5650 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76 5651 bmcr &= ~BMCR_LOOPBACK;
ca58c3af 5652 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
b6016b76 5653
c770a65c 5654 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5655
5656 return 0;
5657}
5658
5659static int
5660bnx2_get_eeprom_len(struct net_device *dev)
5661{
972ec0d4 5662 struct bnx2 *bp = netdev_priv(dev);
b6016b76 5663
1122db71 5664 if (bp->flash_info == NULL)
b6016b76
MC
5665 return 0;
5666
1122db71 5667 return (int) bp->flash_size;
b6016b76
MC
5668}
5669
5670static int
5671bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5672 u8 *eebuf)
5673{
972ec0d4 5674 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5675 int rc;
5676
1064e944 5677 /* parameters already validated in ethtool_get_eeprom */
b6016b76
MC
5678
5679 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
5680
5681 return rc;
5682}
5683
5684static int
5685bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
5686 u8 *eebuf)
5687{
972ec0d4 5688 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5689 int rc;
5690
1064e944 5691 /* parameters already validated in ethtool_set_eeprom */
b6016b76
MC
5692
5693 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
5694
5695 return rc;
5696}
5697
5698static int
5699bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5700{
972ec0d4 5701 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5702
5703 memset(coal, 0, sizeof(struct ethtool_coalesce));
5704
5705 coal->rx_coalesce_usecs = bp->rx_ticks;
5706 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
5707 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
5708 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
5709
5710 coal->tx_coalesce_usecs = bp->tx_ticks;
5711 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
5712 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
5713 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
5714
5715 coal->stats_block_coalesce_usecs = bp->stats_ticks;
5716
5717 return 0;
5718}
5719
5720static int
5721bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
5722{
972ec0d4 5723 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5724
5725 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
5726 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
5727
6aa20a22 5728 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
b6016b76
MC
5729 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
5730
5731 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
5732 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
5733
5734 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
5735 if (bp->rx_quick_cons_trip_int > 0xff)
5736 bp->rx_quick_cons_trip_int = 0xff;
5737
5738 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
5739 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
5740
5741 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
5742 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
5743
5744 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
5745 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
5746
5747 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
5748 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
5749 0xff;
5750
5751 bp->stats_ticks = coal->stats_block_coalesce_usecs;
02537b06
MC
5752 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
5753 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
5754 bp->stats_ticks = USEC_PER_SEC;
5755 }
b6016b76
MC
5756 if (bp->stats_ticks > 0xffff00) bp->stats_ticks = 0xffff00;
5757 bp->stats_ticks &= 0xffff00;
5758
5759 if (netif_running(bp->dev)) {
5760 bnx2_netif_stop(bp);
5761 bnx2_init_nic(bp);
5762 bnx2_netif_start(bp);
5763 }
5764
5765 return 0;
5766}
5767
5768static void
5769bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5770{
972ec0d4 5771 struct bnx2 *bp = netdev_priv(dev);
b6016b76 5772
13daffa2 5773 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
b6016b76
MC
5774 ering->rx_mini_max_pending = 0;
5775 ering->rx_jumbo_max_pending = 0;
5776
5777 ering->rx_pending = bp->rx_ring_size;
5778 ering->rx_mini_pending = 0;
5779 ering->rx_jumbo_pending = 0;
5780
5781 ering->tx_max_pending = MAX_TX_DESC_CNT;
5782 ering->tx_pending = bp->tx_ring_size;
5783}
5784
5785static int
5786bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
5787{
972ec0d4 5788 struct bnx2 *bp = netdev_priv(dev);
b6016b76 5789
13daffa2 5790 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
b6016b76
MC
5791 (ering->tx_pending > MAX_TX_DESC_CNT) ||
5792 (ering->tx_pending <= MAX_SKB_FRAGS)) {
5793
5794 return -EINVAL;
5795 }
13daffa2
MC
5796 if (netif_running(bp->dev)) {
5797 bnx2_netif_stop(bp);
5798 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
5799 bnx2_free_skbs(bp);
5800 bnx2_free_mem(bp);
5801 }
5802
5803 bnx2_set_rx_ring_size(bp, ering->rx_pending);
b6016b76
MC
5804 bp->tx_ring_size = ering->tx_pending;
5805
5806 if (netif_running(bp->dev)) {
13daffa2
MC
5807 int rc;
5808
5809 rc = bnx2_alloc_mem(bp);
5810 if (rc)
5811 return rc;
b6016b76
MC
5812 bnx2_init_nic(bp);
5813 bnx2_netif_start(bp);
5814 }
5815
5816 return 0;
5817}
5818
5819static void
5820bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5821{
972ec0d4 5822 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5823
5824 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
5825 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
5826 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
5827}
5828
5829static int
5830bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
5831{
972ec0d4 5832 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5833
5834 bp->req_flow_ctrl = 0;
5835 if (epause->rx_pause)
5836 bp->req_flow_ctrl |= FLOW_CTRL_RX;
5837 if (epause->tx_pause)
5838 bp->req_flow_ctrl |= FLOW_CTRL_TX;
5839
5840 if (epause->autoneg) {
5841 bp->autoneg |= AUTONEG_FLOW_CTRL;
5842 }
5843 else {
5844 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
5845 }
5846
c770a65c 5847 spin_lock_bh(&bp->phy_lock);
b6016b76 5848
0d8a6571 5849 bnx2_setup_phy(bp, bp->phy_port);
b6016b76 5850
c770a65c 5851 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5852
5853 return 0;
5854}
5855
5856static u32
5857bnx2_get_rx_csum(struct net_device *dev)
5858{
972ec0d4 5859 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5860
5861 return bp->rx_csum;
5862}
5863
5864static int
5865bnx2_set_rx_csum(struct net_device *dev, u32 data)
5866{
972ec0d4 5867 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5868
5869 bp->rx_csum = data;
5870 return 0;
5871}
5872
b11d6213
MC
5873static int
5874bnx2_set_tso(struct net_device *dev, u32 data)
5875{
4666f87a
MC
5876 struct bnx2 *bp = netdev_priv(dev);
5877
5878 if (data) {
b11d6213 5879 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
4666f87a
MC
5880 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5881 dev->features |= NETIF_F_TSO6;
5882 } else
5883 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
5884 NETIF_F_TSO_ECN);
b11d6213
MC
5885 return 0;
5886}
5887
cea94db9 5888#define BNX2_NUM_STATS 46
b6016b76 5889
14ab9b86 5890static struct {
b6016b76
MC
5891 char string[ETH_GSTRING_LEN];
5892} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
5893 { "rx_bytes" },
5894 { "rx_error_bytes" },
5895 { "tx_bytes" },
5896 { "tx_error_bytes" },
5897 { "rx_ucast_packets" },
5898 { "rx_mcast_packets" },
5899 { "rx_bcast_packets" },
5900 { "tx_ucast_packets" },
5901 { "tx_mcast_packets" },
5902 { "tx_bcast_packets" },
5903 { "tx_mac_errors" },
5904 { "tx_carrier_errors" },
5905 { "rx_crc_errors" },
5906 { "rx_align_errors" },
5907 { "tx_single_collisions" },
5908 { "tx_multi_collisions" },
5909 { "tx_deferred" },
5910 { "tx_excess_collisions" },
5911 { "tx_late_collisions" },
5912 { "tx_total_collisions" },
5913 { "rx_fragments" },
5914 { "rx_jabbers" },
5915 { "rx_undersize_packets" },
5916 { "rx_oversize_packets" },
5917 { "rx_64_byte_packets" },
5918 { "rx_65_to_127_byte_packets" },
5919 { "rx_128_to_255_byte_packets" },
5920 { "rx_256_to_511_byte_packets" },
5921 { "rx_512_to_1023_byte_packets" },
5922 { "rx_1024_to_1522_byte_packets" },
5923 { "rx_1523_to_9022_byte_packets" },
5924 { "tx_64_byte_packets" },
5925 { "tx_65_to_127_byte_packets" },
5926 { "tx_128_to_255_byte_packets" },
5927 { "tx_256_to_511_byte_packets" },
5928 { "tx_512_to_1023_byte_packets" },
5929 { "tx_1024_to_1522_byte_packets" },
5930 { "tx_1523_to_9022_byte_packets" },
5931 { "rx_xon_frames" },
5932 { "rx_xoff_frames" },
5933 { "tx_xon_frames" },
5934 { "tx_xoff_frames" },
5935 { "rx_mac_ctrl_frames" },
5936 { "rx_filtered_packets" },
5937 { "rx_discards" },
cea94db9 5938 { "rx_fw_discards" },
b6016b76
MC
5939};
5940
5941#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
5942
f71e1309 5943static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
b6016b76
MC
5944 STATS_OFFSET32(stat_IfHCInOctets_hi),
5945 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
5946 STATS_OFFSET32(stat_IfHCOutOctets_hi),
5947 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
5948 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
5949 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
5950 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
5951 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
5952 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
5953 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
5954 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6aa20a22
JG
5955 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
5956 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
5957 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
5958 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
5959 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
5960 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
5961 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
5962 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
5963 STATS_OFFSET32(stat_EtherStatsCollisions),
5964 STATS_OFFSET32(stat_EtherStatsFragments),
5965 STATS_OFFSET32(stat_EtherStatsJabbers),
5966 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
5967 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
5968 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
5969 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
5970 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
5971 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
5972 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
5973 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
5974 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
5975 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
5976 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
5977 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
5978 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
5979 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
5980 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
5981 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
5982 STATS_OFFSET32(stat_XonPauseFramesReceived),
5983 STATS_OFFSET32(stat_XoffPauseFramesReceived),
5984 STATS_OFFSET32(stat_OutXonSent),
5985 STATS_OFFSET32(stat_OutXoffSent),
5986 STATS_OFFSET32(stat_MacControlFramesReceived),
5987 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
5988 STATS_OFFSET32(stat_IfInMBUFDiscards),
cea94db9 5989 STATS_OFFSET32(stat_FwRxDrop),
b6016b76
MC
5990};
5991
5992/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
5993 * skipped because of errata.
6aa20a22 5994 */
14ab9b86 5995static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
b6016b76
MC
5996 8,0,8,8,8,8,8,8,8,8,
5997 4,0,4,4,4,4,4,4,4,4,
5998 4,4,4,4,4,4,4,4,4,4,
5999 4,4,4,4,4,4,4,4,4,4,
cea94db9 6000 4,4,4,4,4,4,
b6016b76
MC
6001};
6002
5b0c76ad
MC
6003static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6004 8,0,8,8,8,8,8,8,8,8,
6005 4,4,4,4,4,4,4,4,4,4,
6006 4,4,4,4,4,4,4,4,4,4,
6007 4,4,4,4,4,4,4,4,4,4,
cea94db9 6008 4,4,4,4,4,4,
5b0c76ad
MC
6009};
6010
b6016b76
MC
6011#define BNX2_NUM_TESTS 6
6012
14ab9b86 6013static struct {
b6016b76
MC
6014 char string[ETH_GSTRING_LEN];
6015} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6016 { "register_test (offline)" },
6017 { "memory_test (offline)" },
6018 { "loopback_test (offline)" },
6019 { "nvram_test (online)" },
6020 { "interrupt_test (online)" },
6021 { "link_test (online)" },
6022};
6023
6024static int
6025bnx2_self_test_count(struct net_device *dev)
6026{
6027 return BNX2_NUM_TESTS;
6028}
6029
6030static void
6031bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6032{
972ec0d4 6033 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6034
6035 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6036 if (etest->flags & ETH_TEST_FL_OFFLINE) {
80be4434
MC
6037 int i;
6038
b6016b76
MC
6039 bnx2_netif_stop(bp);
6040 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6041 bnx2_free_skbs(bp);
6042
6043 if (bnx2_test_registers(bp) != 0) {
6044 buf[0] = 1;
6045 etest->flags |= ETH_TEST_FL_FAILED;
6046 }
6047 if (bnx2_test_memory(bp) != 0) {
6048 buf[1] = 1;
6049 etest->flags |= ETH_TEST_FL_FAILED;
6050 }
bc5a0690 6051 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
b6016b76 6052 etest->flags |= ETH_TEST_FL_FAILED;
b6016b76
MC
6053
6054 if (!netif_running(bp->dev)) {
6055 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6056 }
6057 else {
6058 bnx2_init_nic(bp);
6059 bnx2_netif_start(bp);
6060 }
6061
6062 /* wait for link up */
80be4434
MC
6063 for (i = 0; i < 7; i++) {
6064 if (bp->link_up)
6065 break;
6066 msleep_interruptible(1000);
6067 }
b6016b76
MC
6068 }
6069
6070 if (bnx2_test_nvram(bp) != 0) {
6071 buf[3] = 1;
6072 etest->flags |= ETH_TEST_FL_FAILED;
6073 }
6074 if (bnx2_test_intr(bp) != 0) {
6075 buf[4] = 1;
6076 etest->flags |= ETH_TEST_FL_FAILED;
6077 }
6078
6079 if (bnx2_test_link(bp) != 0) {
6080 buf[5] = 1;
6081 etest->flags |= ETH_TEST_FL_FAILED;
6082
6083 }
6084}
6085
6086static void
6087bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6088{
6089 switch (stringset) {
6090 case ETH_SS_STATS:
6091 memcpy(buf, bnx2_stats_str_arr,
6092 sizeof(bnx2_stats_str_arr));
6093 break;
6094 case ETH_SS_TEST:
6095 memcpy(buf, bnx2_tests_str_arr,
6096 sizeof(bnx2_tests_str_arr));
6097 break;
6098 }
6099}
6100
6101static int
6102bnx2_get_stats_count(struct net_device *dev)
6103{
6104 return BNX2_NUM_STATS;
6105}
6106
6107static void
6108bnx2_get_ethtool_stats(struct net_device *dev,
6109 struct ethtool_stats *stats, u64 *buf)
6110{
972ec0d4 6111 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6112 int i;
6113 u32 *hw_stats = (u32 *) bp->stats_blk;
14ab9b86 6114 u8 *stats_len_arr = NULL;
b6016b76
MC
6115
6116 if (hw_stats == NULL) {
6117 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
6118 return;
6119 }
6120
5b0c76ad
MC
6121 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
6122 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
6123 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
6124 (CHIP_ID(bp) == CHIP_ID_5708_A0))
b6016b76 6125 stats_len_arr = bnx2_5706_stats_len_arr;
5b0c76ad
MC
6126 else
6127 stats_len_arr = bnx2_5708_stats_len_arr;
b6016b76
MC
6128
6129 for (i = 0; i < BNX2_NUM_STATS; i++) {
6130 if (stats_len_arr[i] == 0) {
6131 /* skip this counter */
6132 buf[i] = 0;
6133 continue;
6134 }
6135 if (stats_len_arr[i] == 4) {
6136 /* 4-byte counter */
6137 buf[i] = (u64)
6138 *(hw_stats + bnx2_stats_offset_arr[i]);
6139 continue;
6140 }
6141 /* 8-byte counter */
6142 buf[i] = (((u64) *(hw_stats +
6143 bnx2_stats_offset_arr[i])) << 32) +
6144 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
6145 }
6146}
6147
6148static int
6149bnx2_phys_id(struct net_device *dev, u32 data)
6150{
972ec0d4 6151 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6152 int i;
6153 u32 save;
6154
6155 if (data == 0)
6156 data = 2;
6157
6158 save = REG_RD(bp, BNX2_MISC_CFG);
6159 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
6160
6161 for (i = 0; i < (data * 2); i++) {
6162 if ((i % 2) == 0) {
6163 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
6164 }
6165 else {
6166 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
6167 BNX2_EMAC_LED_1000MB_OVERRIDE |
6168 BNX2_EMAC_LED_100MB_OVERRIDE |
6169 BNX2_EMAC_LED_10MB_OVERRIDE |
6170 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
6171 BNX2_EMAC_LED_TRAFFIC);
6172 }
6173 msleep_interruptible(500);
6174 if (signal_pending(current))
6175 break;
6176 }
6177 REG_WR(bp, BNX2_EMAC_LED, 0);
6178 REG_WR(bp, BNX2_MISC_CFG, save);
6179 return 0;
6180}
6181
4666f87a
MC
6182static int
6183bnx2_set_tx_csum(struct net_device *dev, u32 data)
6184{
6185 struct bnx2 *bp = netdev_priv(dev);
6186
6187 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6188 return (ethtool_op_set_tx_hw_csum(dev, data));
6189 else
6190 return (ethtool_op_set_tx_csum(dev, data));
6191}
6192
7282d491 6193static const struct ethtool_ops bnx2_ethtool_ops = {
b6016b76
MC
6194 .get_settings = bnx2_get_settings,
6195 .set_settings = bnx2_set_settings,
6196 .get_drvinfo = bnx2_get_drvinfo,
244ac4f4
MC
6197 .get_regs_len = bnx2_get_regs_len,
6198 .get_regs = bnx2_get_regs,
b6016b76
MC
6199 .get_wol = bnx2_get_wol,
6200 .set_wol = bnx2_set_wol,
6201 .nway_reset = bnx2_nway_reset,
6202 .get_link = ethtool_op_get_link,
6203 .get_eeprom_len = bnx2_get_eeprom_len,
6204 .get_eeprom = bnx2_get_eeprom,
6205 .set_eeprom = bnx2_set_eeprom,
6206 .get_coalesce = bnx2_get_coalesce,
6207 .set_coalesce = bnx2_set_coalesce,
6208 .get_ringparam = bnx2_get_ringparam,
6209 .set_ringparam = bnx2_set_ringparam,
6210 .get_pauseparam = bnx2_get_pauseparam,
6211 .set_pauseparam = bnx2_set_pauseparam,
6212 .get_rx_csum = bnx2_get_rx_csum,
6213 .set_rx_csum = bnx2_set_rx_csum,
6214 .get_tx_csum = ethtool_op_get_tx_csum,
4666f87a 6215 .set_tx_csum = bnx2_set_tx_csum,
b6016b76
MC
6216 .get_sg = ethtool_op_get_sg,
6217 .set_sg = ethtool_op_set_sg,
b6016b76 6218 .get_tso = ethtool_op_get_tso,
b11d6213 6219 .set_tso = bnx2_set_tso,
b6016b76
MC
6220 .self_test_count = bnx2_self_test_count,
6221 .self_test = bnx2_self_test,
6222 .get_strings = bnx2_get_strings,
6223 .phys_id = bnx2_phys_id,
6224 .get_stats_count = bnx2_get_stats_count,
6225 .get_ethtool_stats = bnx2_get_ethtool_stats,
24b8e05d 6226 .get_perm_addr = ethtool_op_get_perm_addr,
b6016b76
MC
6227};
6228
6229/* Called with rtnl_lock */
6230static int
6231bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6232{
14ab9b86 6233 struct mii_ioctl_data *data = if_mii(ifr);
972ec0d4 6234 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6235 int err;
6236
6237 switch(cmd) {
6238 case SIOCGMIIPHY:
6239 data->phy_id = bp->phy_addr;
6240
6241 /* fallthru */
6242 case SIOCGMIIREG: {
6243 u32 mii_regval;
6244
7b6b8347
MC
6245 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6246 return -EOPNOTSUPP;
6247
dad3e452
MC
6248 if (!netif_running(dev))
6249 return -EAGAIN;
6250
c770a65c 6251 spin_lock_bh(&bp->phy_lock);
b6016b76 6252 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
c770a65c 6253 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6254
6255 data->val_out = mii_regval;
6256
6257 return err;
6258 }
6259
6260 case SIOCSMIIREG:
6261 if (!capable(CAP_NET_ADMIN))
6262 return -EPERM;
6263
7b6b8347
MC
6264 if (bp->phy_flags & REMOTE_PHY_CAP_FLAG)
6265 return -EOPNOTSUPP;
6266
dad3e452
MC
6267 if (!netif_running(dev))
6268 return -EAGAIN;
6269
c770a65c 6270 spin_lock_bh(&bp->phy_lock);
b6016b76 6271 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
c770a65c 6272 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6273
6274 return err;
6275
6276 default:
6277 /* do nothing */
6278 break;
6279 }
6280 return -EOPNOTSUPP;
6281}
6282
6283/* Called with rtnl_lock */
6284static int
6285bnx2_change_mac_addr(struct net_device *dev, void *p)
6286{
6287 struct sockaddr *addr = p;
972ec0d4 6288 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6289
73eef4cd
MC
6290 if (!is_valid_ether_addr(addr->sa_data))
6291 return -EINVAL;
6292
b6016b76
MC
6293 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
6294 if (netif_running(dev))
6295 bnx2_set_mac_addr(bp);
6296
6297 return 0;
6298}
6299
6300/* Called with rtnl_lock */
6301static int
6302bnx2_change_mtu(struct net_device *dev, int new_mtu)
6303{
972ec0d4 6304 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6305
6306 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
6307 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
6308 return -EINVAL;
6309
6310 dev->mtu = new_mtu;
6311 if (netif_running(dev)) {
6312 bnx2_netif_stop(bp);
6313
6314 bnx2_init_nic(bp);
6315
6316 bnx2_netif_start(bp);
6317 }
6318 return 0;
6319}
6320
6321#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6322static void
6323poll_bnx2(struct net_device *dev)
6324{
972ec0d4 6325 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6326
6327 disable_irq(bp->pdev->irq);
7d12e780 6328 bnx2_interrupt(bp->pdev->irq, dev);
b6016b76
MC
6329 enable_irq(bp->pdev->irq);
6330}
6331#endif
6332
253c8b75
MC
6333static void __devinit
6334bnx2_get_5709_media(struct bnx2 *bp)
6335{
6336 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
6337 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
6338 u32 strap;
6339
6340 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
6341 return;
6342 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
6343 bp->phy_flags |= PHY_SERDES_FLAG;
6344 return;
6345 }
6346
6347 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
6348 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
6349 else
6350 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
6351
6352 if (PCI_FUNC(bp->pdev->devfn) == 0) {
6353 switch (strap) {
6354 case 0x4:
6355 case 0x5:
6356 case 0x6:
6357 bp->phy_flags |= PHY_SERDES_FLAG;
6358 return;
6359 }
6360 } else {
6361 switch (strap) {
6362 case 0x1:
6363 case 0x2:
6364 case 0x4:
6365 bp->phy_flags |= PHY_SERDES_FLAG;
6366 return;
6367 }
6368 }
6369}
6370
883e5151
MC
6371static void __devinit
6372bnx2_get_pci_speed(struct bnx2 *bp)
6373{
6374 u32 reg;
6375
6376 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
6377 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
6378 u32 clkreg;
6379
6380 bp->flags |= PCIX_FLAG;
6381
6382 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
6383
6384 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
6385 switch (clkreg) {
6386 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
6387 bp->bus_speed_mhz = 133;
6388 break;
6389
6390 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
6391 bp->bus_speed_mhz = 100;
6392 break;
6393
6394 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
6395 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
6396 bp->bus_speed_mhz = 66;
6397 break;
6398
6399 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
6400 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
6401 bp->bus_speed_mhz = 50;
6402 break;
6403
6404 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
6405 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
6406 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
6407 bp->bus_speed_mhz = 33;
6408 break;
6409 }
6410 }
6411 else {
6412 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
6413 bp->bus_speed_mhz = 66;
6414 else
6415 bp->bus_speed_mhz = 33;
6416 }
6417
6418 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
6419 bp->flags |= PCI_32BIT_FLAG;
6420
6421}
6422
b6016b76
MC
6423static int __devinit
6424bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
6425{
6426 struct bnx2 *bp;
6427 unsigned long mem_len;
6428 int rc;
6429 u32 reg;
40453c83 6430 u64 dma_mask, persist_dma_mask;
b6016b76
MC
6431
6432 SET_MODULE_OWNER(dev);
6433 SET_NETDEV_DEV(dev, &pdev->dev);
972ec0d4 6434 bp = netdev_priv(dev);
b6016b76
MC
6435
6436 bp->flags = 0;
6437 bp->phy_flags = 0;
6438
6439 /* enable device (incl. PCI PM wakeup), and bus-mastering */
6440 rc = pci_enable_device(pdev);
6441 if (rc) {
9b91cf9d 6442 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.");
b6016b76
MC
6443 goto err_out;
6444 }
6445
6446 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9b91cf9d 6447 dev_err(&pdev->dev,
2e8a538d 6448 "Cannot find PCI device base address, aborting.\n");
b6016b76
MC
6449 rc = -ENODEV;
6450 goto err_out_disable;
6451 }
6452
6453 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6454 if (rc) {
9b91cf9d 6455 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
b6016b76
MC
6456 goto err_out_disable;
6457 }
6458
6459 pci_set_master(pdev);
6460
6461 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6462 if (bp->pm_cap == 0) {
9b91cf9d 6463 dev_err(&pdev->dev,
2e8a538d 6464 "Cannot find power management capability, aborting.\n");
b6016b76
MC
6465 rc = -EIO;
6466 goto err_out_release;
6467 }
6468
b6016b76
MC
6469 bp->dev = dev;
6470 bp->pdev = pdev;
6471
6472 spin_lock_init(&bp->phy_lock);
1b8227c4 6473 spin_lock_init(&bp->indirect_lock);
c4028958 6474 INIT_WORK(&bp->reset_task, bnx2_reset_task);
b6016b76
MC
6475
6476 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
59b47d8a 6477 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + 1);
b6016b76
MC
6478 dev->mem_end = dev->mem_start + mem_len;
6479 dev->irq = pdev->irq;
6480
6481 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
6482
6483 if (!bp->regview) {
9b91cf9d 6484 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
b6016b76
MC
6485 rc = -ENOMEM;
6486 goto err_out_release;
6487 }
6488
6489 /* Configure byte swap and enable write to the reg_window registers.
6490 * Rely on CPU to do target byte swapping on big endian systems
6491 * The chip's target access swapping will not swap all accesses
6492 */
6493 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
6494 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
6495 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
6496
829ca9a3 6497 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
6498
6499 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
6500
883e5151
MC
6501 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6502 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
6503 dev_err(&pdev->dev,
6504 "Cannot find PCIE capability, aborting.\n");
6505 rc = -EIO;
6506 goto err_out_unmap;
6507 }
6508 bp->flags |= PCIE_FLAG;
6509 } else {
59b47d8a
MC
6510 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
6511 if (bp->pcix_cap == 0) {
6512 dev_err(&pdev->dev,
6513 "Cannot find PCIX capability, aborting.\n");
6514 rc = -EIO;
6515 goto err_out_unmap;
6516 }
6517 }
6518
8e6a72c4
MC
6519 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
6520 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
6521 bp->flags |= MSI_CAP_FLAG;
6522 }
6523
40453c83
MC
6524 /* 5708 cannot support DMA addresses > 40-bit. */
6525 if (CHIP_NUM(bp) == CHIP_NUM_5708)
6526 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
6527 else
6528 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
6529
6530 /* Configure DMA attributes. */
6531 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
6532 dev->features |= NETIF_F_HIGHDMA;
6533 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
6534 if (rc) {
6535 dev_err(&pdev->dev,
6536 "pci_set_consistent_dma_mask failed, aborting.\n");
6537 goto err_out_unmap;
6538 }
6539 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
6540 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
6541 goto err_out_unmap;
6542 }
6543
883e5151
MC
6544 if (!(bp->flags & PCIE_FLAG))
6545 bnx2_get_pci_speed(bp);
b6016b76
MC
6546
6547 /* 5706A0 may falsely detect SERR and PERR. */
6548 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6549 reg = REG_RD(bp, PCI_COMMAND);
6550 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
6551 REG_WR(bp, PCI_COMMAND, reg);
6552 }
6553 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
6554 !(bp->flags & PCIX_FLAG)) {
6555
9b91cf9d 6556 dev_err(&pdev->dev,
2e8a538d 6557 "5706 A1 can only be used in a PCIX bus, aborting.\n");
b6016b76
MC
6558 goto err_out_unmap;
6559 }
6560
6561 bnx2_init_nvram(bp);
6562
e3648b3d
MC
6563 reg = REG_RD_IND(bp, BNX2_SHM_HDR_SIGNATURE);
6564
6565 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
24cb230b
MC
6566 BNX2_SHM_HDR_SIGNATURE_SIG) {
6567 u32 off = PCI_FUNC(pdev->devfn) << 2;
6568
6569 bp->shmem_base = REG_RD_IND(bp, BNX2_SHM_HDR_ADDR_0 + off);
6570 } else
e3648b3d
MC
6571 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
6572
b6016b76
MC
6573 /* Get the permanent MAC address. First we need to make sure the
6574 * firmware is actually running.
6575 */
e3648b3d 6576 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_SIGNATURE);
b6016b76
MC
6577
6578 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
6579 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
9b91cf9d 6580 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
b6016b76
MC
6581 rc = -ENODEV;
6582 goto err_out_unmap;
6583 }
6584
e3648b3d 6585 bp->fw_ver = REG_RD_IND(bp, bp->shmem_base + BNX2_DEV_INFO_BC_REV);
b6016b76 6586
e3648b3d 6587 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_UPPER);
b6016b76
MC
6588 bp->mac_addr[0] = (u8) (reg >> 8);
6589 bp->mac_addr[1] = (u8) reg;
6590
e3648b3d 6591 reg = REG_RD_IND(bp, bp->shmem_base + BNX2_PORT_HW_CFG_MAC_LOWER);
b6016b76
MC
6592 bp->mac_addr[2] = (u8) (reg >> 24);
6593 bp->mac_addr[3] = (u8) (reg >> 16);
6594 bp->mac_addr[4] = (u8) (reg >> 8);
6595 bp->mac_addr[5] = (u8) reg;
6596
6597 bp->tx_ring_size = MAX_TX_DESC_CNT;
932f3772 6598 bnx2_set_rx_ring_size(bp, 255);
b6016b76
MC
6599
6600 bp->rx_csum = 1;
6601
6602 bp->rx_offset = sizeof(struct l2_fhdr) + 2;
6603
6604 bp->tx_quick_cons_trip_int = 20;
6605 bp->tx_quick_cons_trip = 20;
6606 bp->tx_ticks_int = 80;
6607 bp->tx_ticks = 80;
6aa20a22 6608
b6016b76
MC
6609 bp->rx_quick_cons_trip_int = 6;
6610 bp->rx_quick_cons_trip = 6;
6611 bp->rx_ticks_int = 18;
6612 bp->rx_ticks = 18;
6613
6614 bp->stats_ticks = 1000000 & 0xffff00;
6615
6616 bp->timer_interval = HZ;
cd339a0e 6617 bp->current_interval = HZ;
b6016b76 6618
5b0c76ad
MC
6619 bp->phy_addr = 1;
6620
b6016b76 6621 /* Disable WOL support if we are running on a SERDES chip. */
253c8b75
MC
6622 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6623 bnx2_get_5709_media(bp);
6624 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
b6016b76 6625 bp->phy_flags |= PHY_SERDES_FLAG;
bac0dff6 6626
0d8a6571 6627 bp->phy_port = PORT_TP;
bac0dff6 6628 if (bp->phy_flags & PHY_SERDES_FLAG) {
0d8a6571 6629 bp->phy_port = PORT_FIBRE;
b6016b76 6630 bp->flags |= NO_WOL_FLAG;
bac0dff6 6631 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
5b0c76ad 6632 bp->phy_addr = 2;
e3648b3d 6633 reg = REG_RD_IND(bp, bp->shmem_base +
5b0c76ad
MC
6634 BNX2_SHARED_HW_CFG_CONFIG);
6635 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
6636 bp->phy_flags |= PHY_2_5G_CAPABLE_FLAG;
6637 }
0d8a6571
MC
6638 bnx2_init_remote_phy(bp);
6639
261dd5ca
MC
6640 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
6641 CHIP_NUM(bp) == CHIP_NUM_5708)
6642 bp->phy_flags |= PHY_CRC_FIX_FLAG;
b659f44e
MC
6643 else if (CHIP_ID(bp) == CHIP_ID_5709_A0)
6644 bp->phy_flags |= PHY_DIS_EARLY_DAC_FLAG;
b6016b76 6645
16088272
MC
6646 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
6647 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
6648 (CHIP_ID(bp) == CHIP_ID_5708_B1))
dda1e390
MC
6649 bp->flags |= NO_WOL_FLAG;
6650
b6016b76
MC
6651 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
6652 bp->tx_quick_cons_trip_int =
6653 bp->tx_quick_cons_trip;
6654 bp->tx_ticks_int = bp->tx_ticks;
6655 bp->rx_quick_cons_trip_int =
6656 bp->rx_quick_cons_trip;
6657 bp->rx_ticks_int = bp->rx_ticks;
6658 bp->comp_prod_trip_int = bp->comp_prod_trip;
6659 bp->com_ticks_int = bp->com_ticks;
6660 bp->cmd_ticks_int = bp->cmd_ticks;
6661 }
6662
f9317a40
MC
6663 /* Disable MSI on 5706 if AMD 8132 bridge is found.
6664 *
6665 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
6666 * with byte enables disabled on the unused 32-bit word. This is legal
6667 * but causes problems on the AMD 8132 which will eventually stop
6668 * responding after a while.
6669 *
6670 * AMD believes this incompatibility is unique to the 5706, and
88187dfa 6671 * prefers to locally disable MSI rather than globally disabling it.
f9317a40
MC
6672 */
6673 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
6674 struct pci_dev *amd_8132 = NULL;
6675
6676 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
6677 PCI_DEVICE_ID_AMD_8132_BRIDGE,
6678 amd_8132))) {
6679 u8 rev;
6680
6681 pci_read_config_byte(amd_8132, PCI_REVISION_ID, &rev);
6682 if (rev >= 0x10 && rev <= 0x13) {
6683 disable_msi = 1;
6684 pci_dev_put(amd_8132);
6685 break;
6686 }
6687 }
6688 }
6689
deaf391b 6690 bnx2_set_default_link(bp);
b6016b76
MC
6691 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
6692
cd339a0e
MC
6693 init_timer(&bp->timer);
6694 bp->timer.expires = RUN_AT(bp->timer_interval);
6695 bp->timer.data = (unsigned long) bp;
6696 bp->timer.function = bnx2_timer;
6697
b6016b76
MC
6698 return 0;
6699
6700err_out_unmap:
6701 if (bp->regview) {
6702 iounmap(bp->regview);
73eef4cd 6703 bp->regview = NULL;
b6016b76
MC
6704 }
6705
6706err_out_release:
6707 pci_release_regions(pdev);
6708
6709err_out_disable:
6710 pci_disable_device(pdev);
6711 pci_set_drvdata(pdev, NULL);
6712
6713err_out:
6714 return rc;
6715}
6716
883e5151
MC
6717static char * __devinit
6718bnx2_bus_string(struct bnx2 *bp, char *str)
6719{
6720 char *s = str;
6721
6722 if (bp->flags & PCIE_FLAG) {
6723 s += sprintf(s, "PCI Express");
6724 } else {
6725 s += sprintf(s, "PCI");
6726 if (bp->flags & PCIX_FLAG)
6727 s += sprintf(s, "-X");
6728 if (bp->flags & PCI_32BIT_FLAG)
6729 s += sprintf(s, " 32-bit");
6730 else
6731 s += sprintf(s, " 64-bit");
6732 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
6733 }
6734 return str;
6735}
6736
b6016b76
MC
6737static int __devinit
6738bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
6739{
6740 static int version_printed = 0;
6741 struct net_device *dev = NULL;
6742 struct bnx2 *bp;
6743 int rc, i;
883e5151 6744 char str[40];
b6016b76
MC
6745
6746 if (version_printed++ == 0)
6747 printk(KERN_INFO "%s", version);
6748
6749 /* dev zeroed in init_etherdev */
6750 dev = alloc_etherdev(sizeof(*bp));
6751
6752 if (!dev)
6753 return -ENOMEM;
6754
6755 rc = bnx2_init_board(pdev, dev);
6756 if (rc < 0) {
6757 free_netdev(dev);
6758 return rc;
6759 }
6760
6761 dev->open = bnx2_open;
6762 dev->hard_start_xmit = bnx2_start_xmit;
6763 dev->stop = bnx2_close;
6764 dev->get_stats = bnx2_get_stats;
6765 dev->set_multicast_list = bnx2_set_rx_mode;
6766 dev->do_ioctl = bnx2_ioctl;
6767 dev->set_mac_address = bnx2_change_mac_addr;
6768 dev->change_mtu = bnx2_change_mtu;
6769 dev->tx_timeout = bnx2_tx_timeout;
6770 dev->watchdog_timeo = TX_TIMEOUT;
6771#ifdef BCM_VLAN
6772 dev->vlan_rx_register = bnx2_vlan_rx_register;
b6016b76
MC
6773#endif
6774 dev->poll = bnx2_poll;
6775 dev->ethtool_ops = &bnx2_ethtool_ops;
6776 dev->weight = 64;
6777
972ec0d4 6778 bp = netdev_priv(dev);
b6016b76
MC
6779
6780#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
6781 dev->poll_controller = poll_bnx2;
6782#endif
6783
1b2f922f
MC
6784 pci_set_drvdata(pdev, dev);
6785
6786 memcpy(dev->dev_addr, bp->mac_addr, 6);
6787 memcpy(dev->perm_addr, bp->mac_addr, 6);
6788 bp->name = board_info[ent->driver_data].name;
6789
d212f87b 6790 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
4666f87a 6791 if (CHIP_NUM(bp) == CHIP_NUM_5709)
d212f87b
SH
6792 dev->features |= NETIF_F_IPV6_CSUM;
6793
1b2f922f
MC
6794#ifdef BCM_VLAN
6795 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
6796#endif
6797 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
4666f87a
MC
6798 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6799 dev->features |= NETIF_F_TSO6;
1b2f922f 6800
b6016b76 6801 if ((rc = register_netdev(dev))) {
9b91cf9d 6802 dev_err(&pdev->dev, "Cannot register net device\n");
b6016b76
MC
6803 if (bp->regview)
6804 iounmap(bp->regview);
6805 pci_release_regions(pdev);
6806 pci_disable_device(pdev);
6807 pci_set_drvdata(pdev, NULL);
6808 free_netdev(dev);
6809 return rc;
6810 }
6811
883e5151 6812 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
b6016b76
MC
6813 "IRQ %d, ",
6814 dev->name,
6815 bp->name,
6816 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
6817 ((CHIP_ID(bp) & 0x0ff0) >> 4),
883e5151 6818 bnx2_bus_string(bp, str),
b6016b76
MC
6819 dev->base_addr,
6820 bp->pdev->irq);
6821
6822 printk("node addr ");
6823 for (i = 0; i < 6; i++)
6824 printk("%2.2x", dev->dev_addr[i]);
6825 printk("\n");
6826
b6016b76
MC
6827 return 0;
6828}
6829
6830static void __devexit
6831bnx2_remove_one(struct pci_dev *pdev)
6832{
6833 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 6834 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6835
afdc08b9
MC
6836 flush_scheduled_work();
6837
b6016b76
MC
6838 unregister_netdev(dev);
6839
6840 if (bp->regview)
6841 iounmap(bp->regview);
6842
6843 free_netdev(dev);
6844 pci_release_regions(pdev);
6845 pci_disable_device(pdev);
6846 pci_set_drvdata(pdev, NULL);
6847}
6848
6849static int
829ca9a3 6850bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
b6016b76
MC
6851{
6852 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 6853 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6854 u32 reset_code;
6855
6856 if (!netif_running(dev))
6857 return 0;
6858
1d60290f 6859 flush_scheduled_work();
b6016b76
MC
6860 bnx2_netif_stop(bp);
6861 netif_device_detach(dev);
6862 del_timer_sync(&bp->timer);
dda1e390 6863 if (bp->flags & NO_WOL_FLAG)
6c4f095e 6864 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
dda1e390 6865 else if (bp->wol)
b6016b76
MC
6866 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
6867 else
6868 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
6869 bnx2_reset_chip(bp, reset_code);
6870 bnx2_free_skbs(bp);
30c517b2 6871 pci_save_state(pdev);
829ca9a3 6872 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
b6016b76
MC
6873 return 0;
6874}
6875
6876static int
6877bnx2_resume(struct pci_dev *pdev)
6878{
6879 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 6880 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6881
6882 if (!netif_running(dev))
6883 return 0;
6884
30c517b2 6885 pci_restore_state(pdev);
829ca9a3 6886 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
6887 netif_device_attach(dev);
6888 bnx2_init_nic(bp);
6889 bnx2_netif_start(bp);
6890 return 0;
6891}
6892
6893static struct pci_driver bnx2_pci_driver = {
14ab9b86
PH
6894 .name = DRV_MODULE_NAME,
6895 .id_table = bnx2_pci_tbl,
6896 .probe = bnx2_init_one,
6897 .remove = __devexit_p(bnx2_remove_one),
6898 .suspend = bnx2_suspend,
6899 .resume = bnx2_resume,
b6016b76
MC
6900};
6901
6902static int __init bnx2_init(void)
6903{
29917620 6904 return pci_register_driver(&bnx2_pci_driver);
b6016b76
MC
6905}
6906
6907static void __exit bnx2_cleanup(void)
6908{
6909 pci_unregister_driver(&bnx2_pci_driver);
6910}
6911
6912module_init(bnx2_init);
6913module_exit(bnx2_cleanup);
6914
6915
6916