]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2.c
net: Allow RX queue selection to seed TX queue hashing.
[net-next-2.6.git] / drivers / net / bnx2.c
CommitLineData
b6016b76
MC
1/* bnx2.c: Broadcom NX2 network driver.
2 *
feebb331 3 * Copyright (c) 2004-2008 Broadcom Corporation
b6016b76
MC
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
f2a4f052
MC
12
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15
16#include <linux/kernel.h>
17#include <linux/timer.h>
18#include <linux/errno.h>
19#include <linux/ioport.h>
20#include <linux/slab.h>
21#include <linux/vmalloc.h>
22#include <linux/interrupt.h>
23#include <linux/pci.h>
24#include <linux/init.h>
25#include <linux/netdevice.h>
26#include <linux/etherdevice.h>
27#include <linux/skbuff.h>
28#include <linux/dma-mapping.h>
1977f032 29#include <linux/bitops.h>
f2a4f052
MC
30#include <asm/io.h>
31#include <asm/irq.h>
32#include <linux/delay.h>
33#include <asm/byteorder.h>
c86a31f4 34#include <asm/page.h>
f2a4f052
MC
35#include <linux/time.h>
36#include <linux/ethtool.h>
37#include <linux/mii.h>
f2a4f052 38#include <linux/if_vlan.h>
08013fa3 39#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
f2a4f052
MC
40#define BCM_VLAN 1
41#endif
f2a4f052 42#include <net/ip.h>
de081fa5 43#include <net/tcp.h>
f2a4f052 44#include <net/checksum.h>
f2a4f052
MC
45#include <linux/workqueue.h>
46#include <linux/crc32.h>
47#include <linux/prefetch.h>
29b12174 48#include <linux/cache.h>
fba9fe91 49#include <linux/zlib.h>
706bf240 50#include <linux/log2.h>
f2a4f052 51
b6016b76
MC
52#include "bnx2.h"
53#include "bnx2_fw.h"
d43584c8 54#include "bnx2_fw2.h"
b6016b76 55
110d0ef9 56#define FW_BUF_SIZE 0x10000
b3448b0b 57
b6016b76
MC
58#define DRV_MODULE_NAME "bnx2"
59#define PFX DRV_MODULE_NAME ": "
1f2435e5
MC
60#define DRV_MODULE_VERSION "1.9.0"
61#define DRV_MODULE_RELDATE "Dec 16, 2008"
b6016b76
MC
62
63#define RUN_AT(x) (jiffies + (x))
64
65/* Time in jiffies before concluding the transmitter is hung. */
66#define TX_TIMEOUT (5*HZ)
67
fefa8645 68static char version[] __devinitdata =
b6016b76
MC
69 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70
71MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
453a9c6e 72MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
b6016b76
MC
73MODULE_LICENSE("GPL");
74MODULE_VERSION(DRV_MODULE_VERSION);
75
76static int disable_msi = 0;
77
78module_param(disable_msi, int, 0);
79MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
80
81typedef enum {
82 BCM5706 = 0,
83 NC370T,
84 NC370I,
85 BCM5706S,
86 NC370F,
5b0c76ad
MC
87 BCM5708,
88 BCM5708S,
bac0dff6 89 BCM5709,
27a005b8 90 BCM5709S,
7bb0a04f 91 BCM5716,
1caacecb 92 BCM5716S,
b6016b76
MC
93} board_t;
94
95/* indexed by board_t, above */
fefa8645 96static struct {
b6016b76
MC
97 char *name;
98} board_info[] __devinitdata = {
99 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
100 { "HP NC370T Multifunction Gigabit Server Adapter" },
101 { "HP NC370i Multifunction Gigabit Server Adapter" },
102 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
103 { "HP NC370F Multifunction Gigabit Server Adapter" },
5b0c76ad
MC
104 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
105 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
bac0dff6 106 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
27a005b8 107 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
7bb0a04f 108 { "Broadcom NetXtreme II BCM5716 1000Base-T" },
1caacecb 109 { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
b6016b76
MC
110 };
111
7bb0a04f 112static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
b6016b76
MC
113 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
114 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
116 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
5b0c76ad
MC
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
b6016b76
MC
121 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
122 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
123 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
124 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
5b0c76ad
MC
125 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
126 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
bac0dff6
MC
127 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
128 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
27a005b8
MC
129 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
130 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
7bb0a04f
MC
131 { PCI_VENDOR_ID_BROADCOM, 0x163b,
132 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
1caacecb 133 { PCI_VENDOR_ID_BROADCOM, 0x163c,
1f2435e5 134 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
b6016b76
MC
135 { 0, }
136};
137
138static struct flash_spec flash_table[] =
139{
e30372c9
MC
140#define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
141#define NONBUFFERED_FLAGS (BNX2_NV_WREN)
b6016b76 142 /* Slow EEPROM */
37137709 143 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
e30372c9 144 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
b6016b76
MC
145 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
146 "EEPROM - slow"},
37137709
MC
147 /* Expansion entry 0001 */
148 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 149 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
150 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
151 "Entry 0001"},
b6016b76
MC
152 /* Saifun SA25F010 (non-buffered flash) */
153 /* strap, cfg1, & write1 need updates */
37137709 154 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 155 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
b6016b76
MC
156 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
157 "Non-buffered flash (128kB)"},
158 /* Saifun SA25F020 (non-buffered flash) */
159 /* strap, cfg1, & write1 need updates */
37137709 160 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 161 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
b6016b76
MC
162 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
163 "Non-buffered flash (256kB)"},
37137709
MC
164 /* Expansion entry 0100 */
165 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 166 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
167 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
168 "Entry 0100"},
169 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
6aa20a22 170 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
e30372c9 171 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
37137709
MC
172 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
173 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
174 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
175 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
e30372c9 176 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
37137709
MC
177 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
178 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
179 /* Saifun SA25F005 (non-buffered flash) */
180 /* strap, cfg1, & write1 need updates */
181 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 182 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
183 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
184 "Non-buffered flash (64kB)"},
185 /* Fast EEPROM */
186 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
e30372c9 187 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
37137709
MC
188 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
189 "EEPROM - fast"},
190 /* Expansion entry 1001 */
191 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 192 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
193 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
194 "Entry 1001"},
195 /* Expansion entry 1010 */
196 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 197 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
198 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
199 "Entry 1010"},
200 /* ATMEL AT45DB011B (buffered flash) */
201 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 202 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
203 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
204 "Buffered flash (128kB)"},
205 /* Expansion entry 1100 */
206 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 207 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
208 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
209 "Entry 1100"},
210 /* Expansion entry 1101 */
211 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 212 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
213 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
214 "Entry 1101"},
215 /* Ateml Expansion entry 1110 */
216 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 217 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
218 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
219 "Entry 1110 (Atmel)"},
220 /* ATMEL AT45DB021B (buffered flash) */
221 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 222 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
223 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
224 "Buffered flash (256kB)"},
b6016b76
MC
225};
226
e30372c9
MC
227static struct flash_spec flash_5709 = {
228 .flags = BNX2_NV_BUFFERED,
229 .page_bits = BCM5709_FLASH_PAGE_BITS,
230 .page_size = BCM5709_FLASH_PAGE_SIZE,
231 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
232 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
233 .name = "5709 Buffered flash (256kB)",
234};
235
b6016b76
MC
236MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
237
35e9010b 238static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
e89bbf10 239{
2f8af120 240 u32 diff;
e89bbf10 241
2f8af120 242 smp_mb();
faac9c4b
MC
243
244 /* The ring uses 256 indices for 255 entries, one of them
245 * needs to be skipped.
246 */
35e9010b 247 diff = txr->tx_prod - txr->tx_cons;
faac9c4b
MC
248 if (unlikely(diff >= TX_DESC_CNT)) {
249 diff &= 0xffff;
250 if (diff == TX_DESC_CNT)
251 diff = MAX_TX_DESC_CNT;
252 }
e89bbf10
MC
253 return (bp->tx_ring_size - diff);
254}
255
b6016b76
MC
256static u32
257bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
258{
1b8227c4
MC
259 u32 val;
260
261 spin_lock_bh(&bp->indirect_lock);
b6016b76 262 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
1b8227c4
MC
263 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
264 spin_unlock_bh(&bp->indirect_lock);
265 return val;
b6016b76
MC
266}
267
268static void
269bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
270{
1b8227c4 271 spin_lock_bh(&bp->indirect_lock);
b6016b76
MC
272 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
273 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
1b8227c4 274 spin_unlock_bh(&bp->indirect_lock);
b6016b76
MC
275}
276
2726d6e1
MC
277static void
278bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
279{
280 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
281}
282
283static u32
284bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
285{
286 return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
287}
288
b6016b76
MC
289static void
290bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
291{
292 offset += cid_addr;
1b8227c4 293 spin_lock_bh(&bp->indirect_lock);
59b47d8a
MC
294 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
295 int i;
296
297 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
298 REG_WR(bp, BNX2_CTX_CTX_CTRL,
299 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
300 for (i = 0; i < 5; i++) {
59b47d8a
MC
301 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
302 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
303 break;
304 udelay(5);
305 }
306 } else {
307 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
308 REG_WR(bp, BNX2_CTX_DATA, val);
309 }
1b8227c4 310 spin_unlock_bh(&bp->indirect_lock);
b6016b76
MC
311}
312
313static int
314bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
315{
316 u32 val1;
317 int i, ret;
318
583c28e5 319 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
b6016b76
MC
320 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
321 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
322
323 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
324 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
325
326 udelay(40);
327 }
328
329 val1 = (bp->phy_addr << 21) | (reg << 16) |
330 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
331 BNX2_EMAC_MDIO_COMM_START_BUSY;
332 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
333
334 for (i = 0; i < 50; i++) {
335 udelay(10);
336
337 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
338 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
339 udelay(5);
340
341 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
342 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
343
344 break;
345 }
346 }
347
348 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
349 *val = 0x0;
350 ret = -EBUSY;
351 }
352 else {
353 *val = val1;
354 ret = 0;
355 }
356
583c28e5 357 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
b6016b76
MC
358 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
359 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
360
361 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
362 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
363
364 udelay(40);
365 }
366
367 return ret;
368}
369
370static int
371bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
372{
373 u32 val1;
374 int i, ret;
375
583c28e5 376 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
b6016b76
MC
377 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
378 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
379
380 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
381 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
382
383 udelay(40);
384 }
385
386 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
387 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
388 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
389 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
6aa20a22 390
b6016b76
MC
391 for (i = 0; i < 50; i++) {
392 udelay(10);
393
394 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
395 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
396 udelay(5);
397 break;
398 }
399 }
400
401 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
402 ret = -EBUSY;
403 else
404 ret = 0;
405
583c28e5 406 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
b6016b76
MC
407 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
408 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
409
410 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
411 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
412
413 udelay(40);
414 }
415
416 return ret;
417}
418
419static void
420bnx2_disable_int(struct bnx2 *bp)
421{
b4b36042
MC
422 int i;
423 struct bnx2_napi *bnapi;
424
425 for (i = 0; i < bp->irq_nvecs; i++) {
426 bnapi = &bp->bnx2_napi[i];
427 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
428 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
429 }
b6016b76
MC
430 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
431}
432
433static void
434bnx2_enable_int(struct bnx2 *bp)
435{
b4b36042
MC
436 int i;
437 struct bnx2_napi *bnapi;
35efa7c1 438
b4b36042
MC
439 for (i = 0; i < bp->irq_nvecs; i++) {
440 bnapi = &bp->bnx2_napi[i];
1269a8a6 441
b4b36042
MC
442 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
443 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
444 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
445 bnapi->last_status_idx);
b6016b76 446
b4b36042
MC
447 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
448 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
449 bnapi->last_status_idx);
450 }
bf5295bb 451 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
b6016b76
MC
452}
453
454static void
455bnx2_disable_int_sync(struct bnx2 *bp)
456{
b4b36042
MC
457 int i;
458
b6016b76
MC
459 atomic_inc(&bp->intr_sem);
460 bnx2_disable_int(bp);
b4b36042
MC
461 for (i = 0; i < bp->irq_nvecs; i++)
462 synchronize_irq(bp->irq_tbl[i].vector);
b6016b76
MC
463}
464
35efa7c1
MC
465static void
466bnx2_napi_disable(struct bnx2 *bp)
467{
b4b36042
MC
468 int i;
469
470 for (i = 0; i < bp->irq_nvecs; i++)
471 napi_disable(&bp->bnx2_napi[i].napi);
35efa7c1
MC
472}
473
474static void
475bnx2_napi_enable(struct bnx2 *bp)
476{
b4b36042
MC
477 int i;
478
479 for (i = 0; i < bp->irq_nvecs; i++)
480 napi_enable(&bp->bnx2_napi[i].napi);
35efa7c1
MC
481}
482
b6016b76
MC
483static void
484bnx2_netif_stop(struct bnx2 *bp)
485{
486 bnx2_disable_int_sync(bp);
487 if (netif_running(bp->dev)) {
35efa7c1 488 bnx2_napi_disable(bp);
b6016b76
MC
489 netif_tx_disable(bp->dev);
490 bp->dev->trans_start = jiffies; /* prevent tx timeout */
491 }
492}
493
494static void
495bnx2_netif_start(struct bnx2 *bp)
496{
497 if (atomic_dec_and_test(&bp->intr_sem)) {
498 if (netif_running(bp->dev)) {
706bf240 499 netif_tx_wake_all_queues(bp->dev);
35efa7c1 500 bnx2_napi_enable(bp);
b6016b76
MC
501 bnx2_enable_int(bp);
502 }
503 }
504}
505
35e9010b
MC
506static void
507bnx2_free_tx_mem(struct bnx2 *bp)
508{
509 int i;
510
511 for (i = 0; i < bp->num_tx_rings; i++) {
512 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
513 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
514
515 if (txr->tx_desc_ring) {
516 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
517 txr->tx_desc_ring,
518 txr->tx_desc_mapping);
519 txr->tx_desc_ring = NULL;
520 }
521 kfree(txr->tx_buf_ring);
522 txr->tx_buf_ring = NULL;
523 }
524}
525
bb4f98ab
MC
526static void
527bnx2_free_rx_mem(struct bnx2 *bp)
528{
529 int i;
530
531 for (i = 0; i < bp->num_rx_rings; i++) {
532 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
533 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
534 int j;
535
536 for (j = 0; j < bp->rx_max_ring; j++) {
537 if (rxr->rx_desc_ring[j])
538 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
539 rxr->rx_desc_ring[j],
540 rxr->rx_desc_mapping[j]);
541 rxr->rx_desc_ring[j] = NULL;
542 }
543 if (rxr->rx_buf_ring)
544 vfree(rxr->rx_buf_ring);
545 rxr->rx_buf_ring = NULL;
546
547 for (j = 0; j < bp->rx_max_pg_ring; j++) {
548 if (rxr->rx_pg_desc_ring[j])
549 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
3298a738
MC
550 rxr->rx_pg_desc_ring[j],
551 rxr->rx_pg_desc_mapping[j]);
552 rxr->rx_pg_desc_ring[j] = NULL;
bb4f98ab
MC
553 }
554 if (rxr->rx_pg_ring)
555 vfree(rxr->rx_pg_ring);
556 rxr->rx_pg_ring = NULL;
557 }
558}
559
35e9010b
MC
560static int
561bnx2_alloc_tx_mem(struct bnx2 *bp)
562{
563 int i;
564
565 for (i = 0; i < bp->num_tx_rings; i++) {
566 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
567 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
568
569 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
570 if (txr->tx_buf_ring == NULL)
571 return -ENOMEM;
572
573 txr->tx_desc_ring =
574 pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
575 &txr->tx_desc_mapping);
576 if (txr->tx_desc_ring == NULL)
577 return -ENOMEM;
578 }
579 return 0;
580}
581
bb4f98ab
MC
582static int
583bnx2_alloc_rx_mem(struct bnx2 *bp)
584{
585 int i;
586
587 for (i = 0; i < bp->num_rx_rings; i++) {
588 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
589 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
590 int j;
591
592 rxr->rx_buf_ring =
593 vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
594 if (rxr->rx_buf_ring == NULL)
595 return -ENOMEM;
596
597 memset(rxr->rx_buf_ring, 0,
598 SW_RXBD_RING_SIZE * bp->rx_max_ring);
599
600 for (j = 0; j < bp->rx_max_ring; j++) {
601 rxr->rx_desc_ring[j] =
602 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
603 &rxr->rx_desc_mapping[j]);
604 if (rxr->rx_desc_ring[j] == NULL)
605 return -ENOMEM;
606
607 }
608
609 if (bp->rx_pg_ring_size) {
610 rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
611 bp->rx_max_pg_ring);
612 if (rxr->rx_pg_ring == NULL)
613 return -ENOMEM;
614
615 memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
616 bp->rx_max_pg_ring);
617 }
618
619 for (j = 0; j < bp->rx_max_pg_ring; j++) {
620 rxr->rx_pg_desc_ring[j] =
621 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
622 &rxr->rx_pg_desc_mapping[j]);
623 if (rxr->rx_pg_desc_ring[j] == NULL)
624 return -ENOMEM;
625
626 }
627 }
628 return 0;
629}
630
b6016b76
MC
631static void
632bnx2_free_mem(struct bnx2 *bp)
633{
13daffa2 634 int i;
43e80b89 635 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
13daffa2 636
35e9010b 637 bnx2_free_tx_mem(bp);
bb4f98ab 638 bnx2_free_rx_mem(bp);
35e9010b 639
59b47d8a
MC
640 for (i = 0; i < bp->ctx_pages; i++) {
641 if (bp->ctx_blk[i]) {
642 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
643 bp->ctx_blk[i],
644 bp->ctx_blk_mapping[i]);
645 bp->ctx_blk[i] = NULL;
646 }
647 }
43e80b89 648 if (bnapi->status_blk.msi) {
0f31f994 649 pci_free_consistent(bp->pdev, bp->status_stats_size,
43e80b89
MC
650 bnapi->status_blk.msi,
651 bp->status_blk_mapping);
652 bnapi->status_blk.msi = NULL;
0f31f994 653 bp->stats_blk = NULL;
b6016b76 654 }
b6016b76
MC
655}
656
657static int
658bnx2_alloc_mem(struct bnx2 *bp)
659{
35e9010b 660 int i, status_blk_size, err;
43e80b89
MC
661 struct bnx2_napi *bnapi;
662 void *status_blk;
b6016b76 663
0f31f994
MC
664 /* Combine status and statistics blocks into one allocation. */
665 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
f86e82fb 666 if (bp->flags & BNX2_FLAG_MSIX_CAP)
b4b36042
MC
667 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
668 BNX2_SBLK_MSIX_ALIGN_SIZE);
0f31f994
MC
669 bp->status_stats_size = status_blk_size +
670 sizeof(struct statistics_block);
671
43e80b89
MC
672 status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
673 &bp->status_blk_mapping);
674 if (status_blk == NULL)
b6016b76
MC
675 goto alloc_mem_err;
676
43e80b89 677 memset(status_blk, 0, bp->status_stats_size);
b6016b76 678
43e80b89
MC
679 bnapi = &bp->bnx2_napi[0];
680 bnapi->status_blk.msi = status_blk;
681 bnapi->hw_tx_cons_ptr =
682 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
683 bnapi->hw_rx_cons_ptr =
684 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
f86e82fb 685 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
b4b36042 686 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
43e80b89
MC
687 struct status_block_msix *sblk;
688
689 bnapi = &bp->bnx2_napi[i];
b4b36042 690
43e80b89
MC
691 sblk = (void *) (status_blk +
692 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
693 bnapi->status_blk.msix = sblk;
694 bnapi->hw_tx_cons_ptr =
695 &sblk->status_tx_quick_consumer_index;
696 bnapi->hw_rx_cons_ptr =
697 &sblk->status_rx_quick_consumer_index;
b4b36042
MC
698 bnapi->int_num = i << 24;
699 }
700 }
35efa7c1 701
43e80b89 702 bp->stats_blk = status_blk + status_blk_size;
b6016b76 703
0f31f994 704 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
b6016b76 705
59b47d8a
MC
706 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
707 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
708 if (bp->ctx_pages == 0)
709 bp->ctx_pages = 1;
710 for (i = 0; i < bp->ctx_pages; i++) {
711 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
712 BCM_PAGE_SIZE,
713 &bp->ctx_blk_mapping[i]);
714 if (bp->ctx_blk[i] == NULL)
715 goto alloc_mem_err;
716 }
717 }
35e9010b 718
bb4f98ab
MC
719 err = bnx2_alloc_rx_mem(bp);
720 if (err)
721 goto alloc_mem_err;
722
35e9010b
MC
723 err = bnx2_alloc_tx_mem(bp);
724 if (err)
725 goto alloc_mem_err;
726
b6016b76
MC
727 return 0;
728
729alloc_mem_err:
730 bnx2_free_mem(bp);
731 return -ENOMEM;
732}
733
e3648b3d
MC
734static void
735bnx2_report_fw_link(struct bnx2 *bp)
736{
737 u32 fw_link_status = 0;
738
583c28e5 739 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
740 return;
741
e3648b3d
MC
742 if (bp->link_up) {
743 u32 bmsr;
744
745 switch (bp->line_speed) {
746 case SPEED_10:
747 if (bp->duplex == DUPLEX_HALF)
748 fw_link_status = BNX2_LINK_STATUS_10HALF;
749 else
750 fw_link_status = BNX2_LINK_STATUS_10FULL;
751 break;
752 case SPEED_100:
753 if (bp->duplex == DUPLEX_HALF)
754 fw_link_status = BNX2_LINK_STATUS_100HALF;
755 else
756 fw_link_status = BNX2_LINK_STATUS_100FULL;
757 break;
758 case SPEED_1000:
759 if (bp->duplex == DUPLEX_HALF)
760 fw_link_status = BNX2_LINK_STATUS_1000HALF;
761 else
762 fw_link_status = BNX2_LINK_STATUS_1000FULL;
763 break;
764 case SPEED_2500:
765 if (bp->duplex == DUPLEX_HALF)
766 fw_link_status = BNX2_LINK_STATUS_2500HALF;
767 else
768 fw_link_status = BNX2_LINK_STATUS_2500FULL;
769 break;
770 }
771
772 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
773
774 if (bp->autoneg) {
775 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
776
ca58c3af
MC
777 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
778 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
e3648b3d
MC
779
780 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
583c28e5 781 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
e3648b3d
MC
782 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
783 else
784 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
785 }
786 }
787 else
788 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
789
2726d6e1 790 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
e3648b3d
MC
791}
792
9b1084b8
MC
793static char *
794bnx2_xceiver_str(struct bnx2 *bp)
795{
796 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
583c28e5 797 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
9b1084b8
MC
798 "Copper"));
799}
800
b6016b76
MC
801static void
802bnx2_report_link(struct bnx2 *bp)
803{
804 if (bp->link_up) {
805 netif_carrier_on(bp->dev);
9b1084b8
MC
806 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
807 bnx2_xceiver_str(bp));
b6016b76
MC
808
809 printk("%d Mbps ", bp->line_speed);
810
811 if (bp->duplex == DUPLEX_FULL)
812 printk("full duplex");
813 else
814 printk("half duplex");
815
816 if (bp->flow_ctrl) {
817 if (bp->flow_ctrl & FLOW_CTRL_RX) {
818 printk(", receive ");
819 if (bp->flow_ctrl & FLOW_CTRL_TX)
820 printk("& transmit ");
821 }
822 else {
823 printk(", transmit ");
824 }
825 printk("flow control ON");
826 }
827 printk("\n");
828 }
829 else {
830 netif_carrier_off(bp->dev);
9b1084b8
MC
831 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
832 bnx2_xceiver_str(bp));
b6016b76 833 }
e3648b3d
MC
834
835 bnx2_report_fw_link(bp);
b6016b76
MC
836}
837
838static void
839bnx2_resolve_flow_ctrl(struct bnx2 *bp)
840{
841 u32 local_adv, remote_adv;
842
843 bp->flow_ctrl = 0;
6aa20a22 844 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
b6016b76
MC
845 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
846
847 if (bp->duplex == DUPLEX_FULL) {
848 bp->flow_ctrl = bp->req_flow_ctrl;
849 }
850 return;
851 }
852
853 if (bp->duplex != DUPLEX_FULL) {
854 return;
855 }
856
583c28e5 857 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
5b0c76ad
MC
858 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
859 u32 val;
860
861 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
862 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
863 bp->flow_ctrl |= FLOW_CTRL_TX;
864 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
865 bp->flow_ctrl |= FLOW_CTRL_RX;
866 return;
867 }
868
ca58c3af
MC
869 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
870 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76 871
583c28e5 872 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
b6016b76
MC
873 u32 new_local_adv = 0;
874 u32 new_remote_adv = 0;
875
876 if (local_adv & ADVERTISE_1000XPAUSE)
877 new_local_adv |= ADVERTISE_PAUSE_CAP;
878 if (local_adv & ADVERTISE_1000XPSE_ASYM)
879 new_local_adv |= ADVERTISE_PAUSE_ASYM;
880 if (remote_adv & ADVERTISE_1000XPAUSE)
881 new_remote_adv |= ADVERTISE_PAUSE_CAP;
882 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
883 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
884
885 local_adv = new_local_adv;
886 remote_adv = new_remote_adv;
887 }
888
889 /* See Table 28B-3 of 802.3ab-1999 spec. */
890 if (local_adv & ADVERTISE_PAUSE_CAP) {
891 if(local_adv & ADVERTISE_PAUSE_ASYM) {
892 if (remote_adv & ADVERTISE_PAUSE_CAP) {
893 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
894 }
895 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
896 bp->flow_ctrl = FLOW_CTRL_RX;
897 }
898 }
899 else {
900 if (remote_adv & ADVERTISE_PAUSE_CAP) {
901 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
902 }
903 }
904 }
905 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
906 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
907 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
908
909 bp->flow_ctrl = FLOW_CTRL_TX;
910 }
911 }
912}
913
27a005b8
MC
914static int
915bnx2_5709s_linkup(struct bnx2 *bp)
916{
917 u32 val, speed;
918
919 bp->link_up = 1;
920
921 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
922 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
923 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
924
925 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
926 bp->line_speed = bp->req_line_speed;
927 bp->duplex = bp->req_duplex;
928 return 0;
929 }
930 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
931 switch (speed) {
932 case MII_BNX2_GP_TOP_AN_SPEED_10:
933 bp->line_speed = SPEED_10;
934 break;
935 case MII_BNX2_GP_TOP_AN_SPEED_100:
936 bp->line_speed = SPEED_100;
937 break;
938 case MII_BNX2_GP_TOP_AN_SPEED_1G:
939 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
940 bp->line_speed = SPEED_1000;
941 break;
942 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
943 bp->line_speed = SPEED_2500;
944 break;
945 }
946 if (val & MII_BNX2_GP_TOP_AN_FD)
947 bp->duplex = DUPLEX_FULL;
948 else
949 bp->duplex = DUPLEX_HALF;
950 return 0;
951}
952
b6016b76 953static int
5b0c76ad
MC
954bnx2_5708s_linkup(struct bnx2 *bp)
955{
956 u32 val;
957
958 bp->link_up = 1;
959 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
960 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
961 case BCM5708S_1000X_STAT1_SPEED_10:
962 bp->line_speed = SPEED_10;
963 break;
964 case BCM5708S_1000X_STAT1_SPEED_100:
965 bp->line_speed = SPEED_100;
966 break;
967 case BCM5708S_1000X_STAT1_SPEED_1G:
968 bp->line_speed = SPEED_1000;
969 break;
970 case BCM5708S_1000X_STAT1_SPEED_2G5:
971 bp->line_speed = SPEED_2500;
972 break;
973 }
974 if (val & BCM5708S_1000X_STAT1_FD)
975 bp->duplex = DUPLEX_FULL;
976 else
977 bp->duplex = DUPLEX_HALF;
978
979 return 0;
980}
981
982static int
983bnx2_5706s_linkup(struct bnx2 *bp)
b6016b76
MC
984{
985 u32 bmcr, local_adv, remote_adv, common;
986
987 bp->link_up = 1;
988 bp->line_speed = SPEED_1000;
989
ca58c3af 990 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
991 if (bmcr & BMCR_FULLDPLX) {
992 bp->duplex = DUPLEX_FULL;
993 }
994 else {
995 bp->duplex = DUPLEX_HALF;
996 }
997
998 if (!(bmcr & BMCR_ANENABLE)) {
999 return 0;
1000 }
1001
ca58c3af
MC
1002 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1003 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
1004
1005 common = local_adv & remote_adv;
1006 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1007
1008 if (common & ADVERTISE_1000XFULL) {
1009 bp->duplex = DUPLEX_FULL;
1010 }
1011 else {
1012 bp->duplex = DUPLEX_HALF;
1013 }
1014 }
1015
1016 return 0;
1017}
1018
1019static int
1020bnx2_copper_linkup(struct bnx2 *bp)
1021{
1022 u32 bmcr;
1023
ca58c3af 1024 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1025 if (bmcr & BMCR_ANENABLE) {
1026 u32 local_adv, remote_adv, common;
1027
1028 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1029 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1030
1031 common = local_adv & (remote_adv >> 2);
1032 if (common & ADVERTISE_1000FULL) {
1033 bp->line_speed = SPEED_1000;
1034 bp->duplex = DUPLEX_FULL;
1035 }
1036 else if (common & ADVERTISE_1000HALF) {
1037 bp->line_speed = SPEED_1000;
1038 bp->duplex = DUPLEX_HALF;
1039 }
1040 else {
ca58c3af
MC
1041 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1042 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
1043
1044 common = local_adv & remote_adv;
1045 if (common & ADVERTISE_100FULL) {
1046 bp->line_speed = SPEED_100;
1047 bp->duplex = DUPLEX_FULL;
1048 }
1049 else if (common & ADVERTISE_100HALF) {
1050 bp->line_speed = SPEED_100;
1051 bp->duplex = DUPLEX_HALF;
1052 }
1053 else if (common & ADVERTISE_10FULL) {
1054 bp->line_speed = SPEED_10;
1055 bp->duplex = DUPLEX_FULL;
1056 }
1057 else if (common & ADVERTISE_10HALF) {
1058 bp->line_speed = SPEED_10;
1059 bp->duplex = DUPLEX_HALF;
1060 }
1061 else {
1062 bp->line_speed = 0;
1063 bp->link_up = 0;
1064 }
1065 }
1066 }
1067 else {
1068 if (bmcr & BMCR_SPEED100) {
1069 bp->line_speed = SPEED_100;
1070 }
1071 else {
1072 bp->line_speed = SPEED_10;
1073 }
1074 if (bmcr & BMCR_FULLDPLX) {
1075 bp->duplex = DUPLEX_FULL;
1076 }
1077 else {
1078 bp->duplex = DUPLEX_HALF;
1079 }
1080 }
1081
1082 return 0;
1083}
1084
83e3fc89 1085static void
bb4f98ab 1086bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
83e3fc89 1087{
bb4f98ab 1088 u32 val, rx_cid_addr = GET_CID_ADDR(cid);
83e3fc89
MC
1089
1090 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1091 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1092 val |= 0x02 << 8;
1093
1094 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1095 u32 lo_water, hi_water;
1096
1097 if (bp->flow_ctrl & FLOW_CTRL_TX)
1098 lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1099 else
1100 lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1101 if (lo_water >= bp->rx_ring_size)
1102 lo_water = 0;
1103
1104 hi_water = bp->rx_ring_size / 4;
1105
1106 if (hi_water <= lo_water)
1107 lo_water = 0;
1108
1109 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1110 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1111
1112 if (hi_water > 0xf)
1113 hi_water = 0xf;
1114 else if (hi_water == 0)
1115 lo_water = 0;
1116 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1117 }
1118 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1119}
1120
bb4f98ab
MC
1121static void
1122bnx2_init_all_rx_contexts(struct bnx2 *bp)
1123{
1124 int i;
1125 u32 cid;
1126
1127 for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1128 if (i == 1)
1129 cid = RX_RSS_CID;
1130 bnx2_init_rx_context(bp, cid);
1131 }
1132}
1133
344478db 1134static void
b6016b76
MC
1135bnx2_set_mac_link(struct bnx2 *bp)
1136{
1137 u32 val;
1138
1139 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1140 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1141 (bp->duplex == DUPLEX_HALF)) {
1142 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1143 }
1144
1145 /* Configure the EMAC mode register. */
1146 val = REG_RD(bp, BNX2_EMAC_MODE);
1147
1148 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
5b0c76ad 1149 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
59b47d8a 1150 BNX2_EMAC_MODE_25G_MODE);
b6016b76
MC
1151
1152 if (bp->link_up) {
5b0c76ad
MC
1153 switch (bp->line_speed) {
1154 case SPEED_10:
59b47d8a
MC
1155 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1156 val |= BNX2_EMAC_MODE_PORT_MII_10M;
5b0c76ad
MC
1157 break;
1158 }
1159 /* fall through */
1160 case SPEED_100:
1161 val |= BNX2_EMAC_MODE_PORT_MII;
1162 break;
1163 case SPEED_2500:
59b47d8a 1164 val |= BNX2_EMAC_MODE_25G_MODE;
5b0c76ad
MC
1165 /* fall through */
1166 case SPEED_1000:
1167 val |= BNX2_EMAC_MODE_PORT_GMII;
1168 break;
1169 }
b6016b76
MC
1170 }
1171 else {
1172 val |= BNX2_EMAC_MODE_PORT_GMII;
1173 }
1174
1175 /* Set the MAC to operate in the appropriate duplex mode. */
1176 if (bp->duplex == DUPLEX_HALF)
1177 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1178 REG_WR(bp, BNX2_EMAC_MODE, val);
1179
1180 /* Enable/disable rx PAUSE. */
1181 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1182
1183 if (bp->flow_ctrl & FLOW_CTRL_RX)
1184 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1185 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1186
1187 /* Enable/disable tx PAUSE. */
1188 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1189 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1190
1191 if (bp->flow_ctrl & FLOW_CTRL_TX)
1192 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1193 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1194
1195 /* Acknowledge the interrupt. */
1196 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1197
83e3fc89 1198 if (CHIP_NUM(bp) == CHIP_NUM_5709)
bb4f98ab 1199 bnx2_init_all_rx_contexts(bp);
b6016b76
MC
1200}
1201
27a005b8
MC
1202static void
1203bnx2_enable_bmsr1(struct bnx2 *bp)
1204{
583c28e5 1205 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
27a005b8
MC
1206 (CHIP_NUM(bp) == CHIP_NUM_5709))
1207 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1208 MII_BNX2_BLK_ADDR_GP_STATUS);
1209}
1210
1211static void
1212bnx2_disable_bmsr1(struct bnx2 *bp)
1213{
583c28e5 1214 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
27a005b8
MC
1215 (CHIP_NUM(bp) == CHIP_NUM_5709))
1216 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1217 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1218}
1219
605a9e20
MC
1220static int
1221bnx2_test_and_enable_2g5(struct bnx2 *bp)
1222{
1223 u32 up1;
1224 int ret = 1;
1225
583c28e5 1226 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
605a9e20
MC
1227 return 0;
1228
1229 if (bp->autoneg & AUTONEG_SPEED)
1230 bp->advertising |= ADVERTISED_2500baseX_Full;
1231
27a005b8
MC
1232 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1233 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1234
605a9e20
MC
1235 bnx2_read_phy(bp, bp->mii_up1, &up1);
1236 if (!(up1 & BCM5708S_UP1_2G5)) {
1237 up1 |= BCM5708S_UP1_2G5;
1238 bnx2_write_phy(bp, bp->mii_up1, up1);
1239 ret = 0;
1240 }
1241
27a005b8
MC
1242 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1243 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1244 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1245
605a9e20
MC
1246 return ret;
1247}
1248
1249static int
1250bnx2_test_and_disable_2g5(struct bnx2 *bp)
1251{
1252 u32 up1;
1253 int ret = 0;
1254
583c28e5 1255 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
605a9e20
MC
1256 return 0;
1257
27a005b8
MC
1258 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1259 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1260
605a9e20
MC
1261 bnx2_read_phy(bp, bp->mii_up1, &up1);
1262 if (up1 & BCM5708S_UP1_2G5) {
1263 up1 &= ~BCM5708S_UP1_2G5;
1264 bnx2_write_phy(bp, bp->mii_up1, up1);
1265 ret = 1;
1266 }
1267
27a005b8
MC
1268 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1269 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1270 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1271
605a9e20
MC
1272 return ret;
1273}
1274
1275static void
1276bnx2_enable_forced_2g5(struct bnx2 *bp)
1277{
1278 u32 bmcr;
1279
583c28e5 1280 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
605a9e20
MC
1281 return;
1282
27a005b8
MC
1283 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1284 u32 val;
1285
1286 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1287 MII_BNX2_BLK_ADDR_SERDES_DIG);
1288 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1289 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1290 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1291 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1292
1293 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1294 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1295 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1296
1297 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1298 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1299 bmcr |= BCM5708S_BMCR_FORCE_2500;
1300 }
1301
1302 if (bp->autoneg & AUTONEG_SPEED) {
1303 bmcr &= ~BMCR_ANENABLE;
1304 if (bp->req_duplex == DUPLEX_FULL)
1305 bmcr |= BMCR_FULLDPLX;
1306 }
1307 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1308}
1309
1310static void
1311bnx2_disable_forced_2g5(struct bnx2 *bp)
1312{
1313 u32 bmcr;
1314
583c28e5 1315 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
605a9e20
MC
1316 return;
1317
27a005b8
MC
1318 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1319 u32 val;
1320
1321 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1322 MII_BNX2_BLK_ADDR_SERDES_DIG);
1323 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1324 val &= ~MII_BNX2_SD_MISC1_FORCE;
1325 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1326
1327 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1328 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1329 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1330
1331 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1332 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1333 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1334 }
1335
1336 if (bp->autoneg & AUTONEG_SPEED)
1337 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1338 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1339}
1340
b2fadeae
MC
1341static void
1342bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1343{
1344 u32 val;
1345
1346 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1347 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1348 if (start)
1349 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1350 else
1351 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1352}
1353
b6016b76
MC
1354static int
1355bnx2_set_link(struct bnx2 *bp)
1356{
1357 u32 bmsr;
1358 u8 link_up;
1359
80be4434 1360 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
b6016b76
MC
1361 bp->link_up = 1;
1362 return 0;
1363 }
1364
583c28e5 1365 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
1366 return 0;
1367
b6016b76
MC
1368 link_up = bp->link_up;
1369
27a005b8
MC
1370 bnx2_enable_bmsr1(bp);
1371 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1372 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1373 bnx2_disable_bmsr1(bp);
b6016b76 1374
583c28e5 1375 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
b6016b76 1376 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
a2724e25 1377 u32 val, an_dbg;
b6016b76 1378
583c28e5 1379 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
b2fadeae 1380 bnx2_5706s_force_link_dn(bp, 0);
583c28e5 1381 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
b2fadeae 1382 }
b6016b76 1383 val = REG_RD(bp, BNX2_EMAC_STATUS);
a2724e25
MC
1384
1385 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1386 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1387 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1388
1389 if ((val & BNX2_EMAC_STATUS_LINK) &&
1390 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
b6016b76
MC
1391 bmsr |= BMSR_LSTATUS;
1392 else
1393 bmsr &= ~BMSR_LSTATUS;
1394 }
1395
1396 if (bmsr & BMSR_LSTATUS) {
1397 bp->link_up = 1;
1398
583c28e5 1399 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5b0c76ad
MC
1400 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1401 bnx2_5706s_linkup(bp);
1402 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1403 bnx2_5708s_linkup(bp);
27a005b8
MC
1404 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1405 bnx2_5709s_linkup(bp);
b6016b76
MC
1406 }
1407 else {
1408 bnx2_copper_linkup(bp);
1409 }
1410 bnx2_resolve_flow_ctrl(bp);
1411 }
1412 else {
583c28e5 1413 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
605a9e20
MC
1414 (bp->autoneg & AUTONEG_SPEED))
1415 bnx2_disable_forced_2g5(bp);
b6016b76 1416
583c28e5 1417 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
b2fadeae
MC
1418 u32 bmcr;
1419
1420 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1421 bmcr |= BMCR_ANENABLE;
1422 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1423
583c28e5 1424 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
b2fadeae 1425 }
b6016b76
MC
1426 bp->link_up = 0;
1427 }
1428
1429 if (bp->link_up != link_up) {
1430 bnx2_report_link(bp);
1431 }
1432
1433 bnx2_set_mac_link(bp);
1434
1435 return 0;
1436}
1437
1438static int
1439bnx2_reset_phy(struct bnx2 *bp)
1440{
1441 int i;
1442 u32 reg;
1443
ca58c3af 1444 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
b6016b76
MC
1445
1446#define PHY_RESET_MAX_WAIT 100
1447 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1448 udelay(10);
1449
ca58c3af 1450 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
b6016b76
MC
1451 if (!(reg & BMCR_RESET)) {
1452 udelay(20);
1453 break;
1454 }
1455 }
1456 if (i == PHY_RESET_MAX_WAIT) {
1457 return -EBUSY;
1458 }
1459 return 0;
1460}
1461
1462static u32
1463bnx2_phy_get_pause_adv(struct bnx2 *bp)
1464{
1465 u32 adv = 0;
1466
1467 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1468 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1469
583c28e5 1470 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
b6016b76
MC
1471 adv = ADVERTISE_1000XPAUSE;
1472 }
1473 else {
1474 adv = ADVERTISE_PAUSE_CAP;
1475 }
1476 }
1477 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
583c28e5 1478 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
b6016b76
MC
1479 adv = ADVERTISE_1000XPSE_ASYM;
1480 }
1481 else {
1482 adv = ADVERTISE_PAUSE_ASYM;
1483 }
1484 }
1485 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
583c28e5 1486 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
b6016b76
MC
1487 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1488 }
1489 else {
1490 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1491 }
1492 }
1493 return adv;
1494}
1495
a2f13890 1496static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
0d8a6571 1497
b6016b76 1498static int
0d8a6571 1499bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
52d07b1f
HH
1500__releases(&bp->phy_lock)
1501__acquires(&bp->phy_lock)
0d8a6571
MC
1502{
1503 u32 speed_arg = 0, pause_adv;
1504
1505 pause_adv = bnx2_phy_get_pause_adv(bp);
1506
1507 if (bp->autoneg & AUTONEG_SPEED) {
1508 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1509 if (bp->advertising & ADVERTISED_10baseT_Half)
1510 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1511 if (bp->advertising & ADVERTISED_10baseT_Full)
1512 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1513 if (bp->advertising & ADVERTISED_100baseT_Half)
1514 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1515 if (bp->advertising & ADVERTISED_100baseT_Full)
1516 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1517 if (bp->advertising & ADVERTISED_1000baseT_Full)
1518 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1519 if (bp->advertising & ADVERTISED_2500baseX_Full)
1520 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1521 } else {
1522 if (bp->req_line_speed == SPEED_2500)
1523 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1524 else if (bp->req_line_speed == SPEED_1000)
1525 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1526 else if (bp->req_line_speed == SPEED_100) {
1527 if (bp->req_duplex == DUPLEX_FULL)
1528 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1529 else
1530 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1531 } else if (bp->req_line_speed == SPEED_10) {
1532 if (bp->req_duplex == DUPLEX_FULL)
1533 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1534 else
1535 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1536 }
1537 }
1538
1539 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1540 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
c26736ec 1541 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
0d8a6571
MC
1542 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1543
1544 if (port == PORT_TP)
1545 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1546 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1547
2726d6e1 1548 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
0d8a6571
MC
1549
1550 spin_unlock_bh(&bp->phy_lock);
a2f13890 1551 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
0d8a6571
MC
1552 spin_lock_bh(&bp->phy_lock);
1553
1554 return 0;
1555}
1556
1557static int
1558bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
52d07b1f
HH
1559__releases(&bp->phy_lock)
1560__acquires(&bp->phy_lock)
b6016b76 1561{
605a9e20 1562 u32 adv, bmcr;
b6016b76
MC
1563 u32 new_adv = 0;
1564
583c28e5 1565 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
1566 return (bnx2_setup_remote_phy(bp, port));
1567
b6016b76
MC
1568 if (!(bp->autoneg & AUTONEG_SPEED)) {
1569 u32 new_bmcr;
5b0c76ad
MC
1570 int force_link_down = 0;
1571
605a9e20
MC
1572 if (bp->req_line_speed == SPEED_2500) {
1573 if (!bnx2_test_and_enable_2g5(bp))
1574 force_link_down = 1;
1575 } else if (bp->req_line_speed == SPEED_1000) {
1576 if (bnx2_test_and_disable_2g5(bp))
1577 force_link_down = 1;
1578 }
ca58c3af 1579 bnx2_read_phy(bp, bp->mii_adv, &adv);
80be4434
MC
1580 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1581
ca58c3af 1582 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
605a9e20 1583 new_bmcr = bmcr & ~BMCR_ANENABLE;
80be4434 1584 new_bmcr |= BMCR_SPEED1000;
605a9e20 1585
27a005b8
MC
1586 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1587 if (bp->req_line_speed == SPEED_2500)
1588 bnx2_enable_forced_2g5(bp);
1589 else if (bp->req_line_speed == SPEED_1000) {
1590 bnx2_disable_forced_2g5(bp);
1591 new_bmcr &= ~0x2000;
1592 }
1593
1594 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1595 if (bp->req_line_speed == SPEED_2500)
1596 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1597 else
1598 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
5b0c76ad
MC
1599 }
1600
b6016b76 1601 if (bp->req_duplex == DUPLEX_FULL) {
5b0c76ad 1602 adv |= ADVERTISE_1000XFULL;
b6016b76
MC
1603 new_bmcr |= BMCR_FULLDPLX;
1604 }
1605 else {
5b0c76ad 1606 adv |= ADVERTISE_1000XHALF;
b6016b76
MC
1607 new_bmcr &= ~BMCR_FULLDPLX;
1608 }
5b0c76ad 1609 if ((new_bmcr != bmcr) || (force_link_down)) {
b6016b76
MC
1610 /* Force a link down visible on the other side */
1611 if (bp->link_up) {
ca58c3af 1612 bnx2_write_phy(bp, bp->mii_adv, adv &
5b0c76ad
MC
1613 ~(ADVERTISE_1000XFULL |
1614 ADVERTISE_1000XHALF));
ca58c3af 1615 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
b6016b76
MC
1616 BMCR_ANRESTART | BMCR_ANENABLE);
1617
1618 bp->link_up = 0;
1619 netif_carrier_off(bp->dev);
ca58c3af 1620 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
80be4434 1621 bnx2_report_link(bp);
b6016b76 1622 }
ca58c3af
MC
1623 bnx2_write_phy(bp, bp->mii_adv, adv);
1624 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
605a9e20
MC
1625 } else {
1626 bnx2_resolve_flow_ctrl(bp);
1627 bnx2_set_mac_link(bp);
b6016b76
MC
1628 }
1629 return 0;
1630 }
1631
605a9e20 1632 bnx2_test_and_enable_2g5(bp);
5b0c76ad 1633
b6016b76
MC
1634 if (bp->advertising & ADVERTISED_1000baseT_Full)
1635 new_adv |= ADVERTISE_1000XFULL;
1636
1637 new_adv |= bnx2_phy_get_pause_adv(bp);
1638
ca58c3af
MC
1639 bnx2_read_phy(bp, bp->mii_adv, &adv);
1640 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1641
1642 bp->serdes_an_pending = 0;
1643 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1644 /* Force a link down visible on the other side */
1645 if (bp->link_up) {
ca58c3af 1646 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
80be4434
MC
1647 spin_unlock_bh(&bp->phy_lock);
1648 msleep(20);
1649 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
1650 }
1651
ca58c3af
MC
1652 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1653 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
b6016b76 1654 BMCR_ANENABLE);
f8dd064e
MC
1655 /* Speed up link-up time when the link partner
1656 * does not autonegotiate which is very common
1657 * in blade servers. Some blade servers use
1658 * IPMI for kerboard input and it's important
1659 * to minimize link disruptions. Autoneg. involves
1660 * exchanging base pages plus 3 next pages and
1661 * normally completes in about 120 msec.
1662 */
40105c0b 1663 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
f8dd064e
MC
1664 bp->serdes_an_pending = 1;
1665 mod_timer(&bp->timer, jiffies + bp->current_interval);
605a9e20
MC
1666 } else {
1667 bnx2_resolve_flow_ctrl(bp);
1668 bnx2_set_mac_link(bp);
b6016b76
MC
1669 }
1670
1671 return 0;
1672}
1673
1674#define ETHTOOL_ALL_FIBRE_SPEED \
583c28e5 1675 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
deaf391b
MC
1676 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1677 (ADVERTISED_1000baseT_Full)
b6016b76
MC
1678
1679#define ETHTOOL_ALL_COPPER_SPEED \
1680 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1681 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1682 ADVERTISED_1000baseT_Full)
1683
1684#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1685 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
6aa20a22 1686
b6016b76
MC
1687#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1688
0d8a6571
MC
1689static void
1690bnx2_set_default_remote_link(struct bnx2 *bp)
1691{
1692 u32 link;
1693
1694 if (bp->phy_port == PORT_TP)
2726d6e1 1695 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
0d8a6571 1696 else
2726d6e1 1697 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
0d8a6571
MC
1698
1699 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1700 bp->req_line_speed = 0;
1701 bp->autoneg |= AUTONEG_SPEED;
1702 bp->advertising = ADVERTISED_Autoneg;
1703 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1704 bp->advertising |= ADVERTISED_10baseT_Half;
1705 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1706 bp->advertising |= ADVERTISED_10baseT_Full;
1707 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1708 bp->advertising |= ADVERTISED_100baseT_Half;
1709 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1710 bp->advertising |= ADVERTISED_100baseT_Full;
1711 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1712 bp->advertising |= ADVERTISED_1000baseT_Full;
1713 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1714 bp->advertising |= ADVERTISED_2500baseX_Full;
1715 } else {
1716 bp->autoneg = 0;
1717 bp->advertising = 0;
1718 bp->req_duplex = DUPLEX_FULL;
1719 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1720 bp->req_line_speed = SPEED_10;
1721 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1722 bp->req_duplex = DUPLEX_HALF;
1723 }
1724 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1725 bp->req_line_speed = SPEED_100;
1726 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1727 bp->req_duplex = DUPLEX_HALF;
1728 }
1729 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1730 bp->req_line_speed = SPEED_1000;
1731 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1732 bp->req_line_speed = SPEED_2500;
1733 }
1734}
1735
deaf391b
MC
1736static void
1737bnx2_set_default_link(struct bnx2 *bp)
1738{
ab59859d
HH
1739 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1740 bnx2_set_default_remote_link(bp);
1741 return;
1742 }
0d8a6571 1743
deaf391b
MC
1744 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1745 bp->req_line_speed = 0;
583c28e5 1746 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
deaf391b
MC
1747 u32 reg;
1748
1749 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1750
2726d6e1 1751 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
deaf391b
MC
1752 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1753 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1754 bp->autoneg = 0;
1755 bp->req_line_speed = bp->line_speed = SPEED_1000;
1756 bp->req_duplex = DUPLEX_FULL;
1757 }
1758 } else
1759 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1760}
1761
df149d70
MC
1762static void
1763bnx2_send_heart_beat(struct bnx2 *bp)
1764{
1765 u32 msg;
1766 u32 addr;
1767
1768 spin_lock(&bp->indirect_lock);
1769 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1770 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1771 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1772 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1773 spin_unlock(&bp->indirect_lock);
1774}
1775
0d8a6571
MC
1776static void
1777bnx2_remote_phy_event(struct bnx2 *bp)
1778{
1779 u32 msg;
1780 u8 link_up = bp->link_up;
1781 u8 old_port;
1782
2726d6e1 1783 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
0d8a6571 1784
df149d70
MC
1785 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1786 bnx2_send_heart_beat(bp);
1787
1788 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1789
0d8a6571
MC
1790 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1791 bp->link_up = 0;
1792 else {
1793 u32 speed;
1794
1795 bp->link_up = 1;
1796 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1797 bp->duplex = DUPLEX_FULL;
1798 switch (speed) {
1799 case BNX2_LINK_STATUS_10HALF:
1800 bp->duplex = DUPLEX_HALF;
1801 case BNX2_LINK_STATUS_10FULL:
1802 bp->line_speed = SPEED_10;
1803 break;
1804 case BNX2_LINK_STATUS_100HALF:
1805 bp->duplex = DUPLEX_HALF;
1806 case BNX2_LINK_STATUS_100BASE_T4:
1807 case BNX2_LINK_STATUS_100FULL:
1808 bp->line_speed = SPEED_100;
1809 break;
1810 case BNX2_LINK_STATUS_1000HALF:
1811 bp->duplex = DUPLEX_HALF;
1812 case BNX2_LINK_STATUS_1000FULL:
1813 bp->line_speed = SPEED_1000;
1814 break;
1815 case BNX2_LINK_STATUS_2500HALF:
1816 bp->duplex = DUPLEX_HALF;
1817 case BNX2_LINK_STATUS_2500FULL:
1818 bp->line_speed = SPEED_2500;
1819 break;
1820 default:
1821 bp->line_speed = 0;
1822 break;
1823 }
1824
0d8a6571
MC
1825 bp->flow_ctrl = 0;
1826 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1827 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1828 if (bp->duplex == DUPLEX_FULL)
1829 bp->flow_ctrl = bp->req_flow_ctrl;
1830 } else {
1831 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1832 bp->flow_ctrl |= FLOW_CTRL_TX;
1833 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
1834 bp->flow_ctrl |= FLOW_CTRL_RX;
1835 }
1836
1837 old_port = bp->phy_port;
1838 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
1839 bp->phy_port = PORT_FIBRE;
1840 else
1841 bp->phy_port = PORT_TP;
1842
1843 if (old_port != bp->phy_port)
1844 bnx2_set_default_link(bp);
1845
0d8a6571
MC
1846 }
1847 if (bp->link_up != link_up)
1848 bnx2_report_link(bp);
1849
1850 bnx2_set_mac_link(bp);
1851}
1852
1853static int
1854bnx2_set_remote_link(struct bnx2 *bp)
1855{
1856 u32 evt_code;
1857
2726d6e1 1858 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
0d8a6571
MC
1859 switch (evt_code) {
1860 case BNX2_FW_EVT_CODE_LINK_EVENT:
1861 bnx2_remote_phy_event(bp);
1862 break;
1863 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
1864 default:
df149d70 1865 bnx2_send_heart_beat(bp);
0d8a6571
MC
1866 break;
1867 }
1868 return 0;
1869}
1870
b6016b76
MC
1871static int
1872bnx2_setup_copper_phy(struct bnx2 *bp)
52d07b1f
HH
1873__releases(&bp->phy_lock)
1874__acquires(&bp->phy_lock)
b6016b76
MC
1875{
1876 u32 bmcr;
1877 u32 new_bmcr;
1878
ca58c3af 1879 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1880
1881 if (bp->autoneg & AUTONEG_SPEED) {
1882 u32 adv_reg, adv1000_reg;
1883 u32 new_adv_reg = 0;
1884 u32 new_adv1000_reg = 0;
1885
ca58c3af 1886 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
b6016b76
MC
1887 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
1888 ADVERTISE_PAUSE_ASYM);
1889
1890 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
1891 adv1000_reg &= PHY_ALL_1000_SPEED;
1892
1893 if (bp->advertising & ADVERTISED_10baseT_Half)
1894 new_adv_reg |= ADVERTISE_10HALF;
1895 if (bp->advertising & ADVERTISED_10baseT_Full)
1896 new_adv_reg |= ADVERTISE_10FULL;
1897 if (bp->advertising & ADVERTISED_100baseT_Half)
1898 new_adv_reg |= ADVERTISE_100HALF;
1899 if (bp->advertising & ADVERTISED_100baseT_Full)
1900 new_adv_reg |= ADVERTISE_100FULL;
1901 if (bp->advertising & ADVERTISED_1000baseT_Full)
1902 new_adv1000_reg |= ADVERTISE_1000FULL;
6aa20a22 1903
b6016b76
MC
1904 new_adv_reg |= ADVERTISE_CSMA;
1905
1906 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
1907
1908 if ((adv1000_reg != new_adv1000_reg) ||
1909 (adv_reg != new_adv_reg) ||
1910 ((bmcr & BMCR_ANENABLE) == 0)) {
1911
ca58c3af 1912 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
b6016b76 1913 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
ca58c3af 1914 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
b6016b76
MC
1915 BMCR_ANENABLE);
1916 }
1917 else if (bp->link_up) {
1918 /* Flow ctrl may have changed from auto to forced */
1919 /* or vice-versa. */
1920
1921 bnx2_resolve_flow_ctrl(bp);
1922 bnx2_set_mac_link(bp);
1923 }
1924 return 0;
1925 }
1926
1927 new_bmcr = 0;
1928 if (bp->req_line_speed == SPEED_100) {
1929 new_bmcr |= BMCR_SPEED100;
1930 }
1931 if (bp->req_duplex == DUPLEX_FULL) {
1932 new_bmcr |= BMCR_FULLDPLX;
1933 }
1934 if (new_bmcr != bmcr) {
1935 u32 bmsr;
b6016b76 1936
ca58c3af
MC
1937 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1938 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
6aa20a22 1939
b6016b76
MC
1940 if (bmsr & BMSR_LSTATUS) {
1941 /* Force link down */
ca58c3af 1942 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
a16dda0e
MC
1943 spin_unlock_bh(&bp->phy_lock);
1944 msleep(50);
1945 spin_lock_bh(&bp->phy_lock);
1946
ca58c3af
MC
1947 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
1948 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
b6016b76
MC
1949 }
1950
ca58c3af 1951 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
b6016b76
MC
1952
1953 /* Normally, the new speed is setup after the link has
1954 * gone down and up again. In some cases, link will not go
1955 * down so we need to set up the new speed here.
1956 */
1957 if (bmsr & BMSR_LSTATUS) {
1958 bp->line_speed = bp->req_line_speed;
1959 bp->duplex = bp->req_duplex;
1960 bnx2_resolve_flow_ctrl(bp);
1961 bnx2_set_mac_link(bp);
1962 }
27a005b8
MC
1963 } else {
1964 bnx2_resolve_flow_ctrl(bp);
1965 bnx2_set_mac_link(bp);
b6016b76
MC
1966 }
1967 return 0;
1968}
1969
1970static int
0d8a6571 1971bnx2_setup_phy(struct bnx2 *bp, u8 port)
52d07b1f
HH
1972__releases(&bp->phy_lock)
1973__acquires(&bp->phy_lock)
b6016b76
MC
1974{
1975 if (bp->loopback == MAC_LOOPBACK)
1976 return 0;
1977
583c28e5 1978 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
0d8a6571 1979 return (bnx2_setup_serdes_phy(bp, port));
b6016b76
MC
1980 }
1981 else {
1982 return (bnx2_setup_copper_phy(bp));
1983 }
1984}
1985
27a005b8 1986static int
9a120bc5 1987bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
27a005b8
MC
1988{
1989 u32 val;
1990
1991 bp->mii_bmcr = MII_BMCR + 0x10;
1992 bp->mii_bmsr = MII_BMSR + 0x10;
1993 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
1994 bp->mii_adv = MII_ADVERTISE + 0x10;
1995 bp->mii_lpa = MII_LPA + 0x10;
1996 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
1997
1998 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
1999 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2000
2001 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
9a120bc5
MC
2002 if (reset_phy)
2003 bnx2_reset_phy(bp);
27a005b8
MC
2004
2005 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2006
2007 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2008 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2009 val |= MII_BNX2_SD_1000XCTL1_FIBER;
2010 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2011
2012 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2013 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
583c28e5 2014 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
27a005b8
MC
2015 val |= BCM5708S_UP1_2G5;
2016 else
2017 val &= ~BCM5708S_UP1_2G5;
2018 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2019
2020 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2021 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2022 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2023 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2024
2025 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2026
2027 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2028 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2029 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2030
2031 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2032
2033 return 0;
2034}
2035
b6016b76 2036static int
9a120bc5 2037bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
5b0c76ad
MC
2038{
2039 u32 val;
2040
9a120bc5
MC
2041 if (reset_phy)
2042 bnx2_reset_phy(bp);
27a005b8
MC
2043
2044 bp->mii_up1 = BCM5708S_UP1;
2045
5b0c76ad
MC
2046 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2047 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2048 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2049
2050 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2051 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2052 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2053
2054 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2055 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2056 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2057
583c28e5 2058 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
5b0c76ad
MC
2059 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2060 val |= BCM5708S_UP1_2G5;
2061 bnx2_write_phy(bp, BCM5708S_UP1, val);
2062 }
2063
2064 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
dda1e390
MC
2065 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2066 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
5b0c76ad
MC
2067 /* increase tx signal amplitude */
2068 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2069 BCM5708S_BLK_ADDR_TX_MISC);
2070 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2071 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2072 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2073 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2074 }
2075
2726d6e1 2076 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
5b0c76ad
MC
2077 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2078
2079 if (val) {
2080 u32 is_backplane;
2081
2726d6e1 2082 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
5b0c76ad
MC
2083 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2084 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2085 BCM5708S_BLK_ADDR_TX_MISC);
2086 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2087 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2088 BCM5708S_BLK_ADDR_DIG);
2089 }
2090 }
2091 return 0;
2092}
2093
2094static int
9a120bc5 2095bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
b6016b76 2096{
9a120bc5
MC
2097 if (reset_phy)
2098 bnx2_reset_phy(bp);
27a005b8 2099
583c28e5 2100 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
b6016b76 2101
59b47d8a
MC
2102 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2103 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
b6016b76
MC
2104
2105 if (bp->dev->mtu > 1500) {
2106 u32 val;
2107
2108 /* Set extended packet length bit */
2109 bnx2_write_phy(bp, 0x18, 0x7);
2110 bnx2_read_phy(bp, 0x18, &val);
2111 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2112
2113 bnx2_write_phy(bp, 0x1c, 0x6c00);
2114 bnx2_read_phy(bp, 0x1c, &val);
2115 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2116 }
2117 else {
2118 u32 val;
2119
2120 bnx2_write_phy(bp, 0x18, 0x7);
2121 bnx2_read_phy(bp, 0x18, &val);
2122 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2123
2124 bnx2_write_phy(bp, 0x1c, 0x6c00);
2125 bnx2_read_phy(bp, 0x1c, &val);
2126 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2127 }
2128
2129 return 0;
2130}
2131
2132static int
9a120bc5 2133bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
b6016b76 2134{
5b0c76ad
MC
2135 u32 val;
2136
9a120bc5
MC
2137 if (reset_phy)
2138 bnx2_reset_phy(bp);
27a005b8 2139
583c28e5 2140 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
b6016b76
MC
2141 bnx2_write_phy(bp, 0x18, 0x0c00);
2142 bnx2_write_phy(bp, 0x17, 0x000a);
2143 bnx2_write_phy(bp, 0x15, 0x310b);
2144 bnx2_write_phy(bp, 0x17, 0x201f);
2145 bnx2_write_phy(bp, 0x15, 0x9506);
2146 bnx2_write_phy(bp, 0x17, 0x401f);
2147 bnx2_write_phy(bp, 0x15, 0x14e2);
2148 bnx2_write_phy(bp, 0x18, 0x0400);
2149 }
2150
583c28e5 2151 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
b659f44e
MC
2152 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2153 MII_BNX2_DSP_EXPAND_REG | 0x8);
2154 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2155 val &= ~(1 << 8);
2156 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2157 }
2158
b6016b76 2159 if (bp->dev->mtu > 1500) {
b6016b76
MC
2160 /* Set extended packet length bit */
2161 bnx2_write_phy(bp, 0x18, 0x7);
2162 bnx2_read_phy(bp, 0x18, &val);
2163 bnx2_write_phy(bp, 0x18, val | 0x4000);
2164
2165 bnx2_read_phy(bp, 0x10, &val);
2166 bnx2_write_phy(bp, 0x10, val | 0x1);
2167 }
2168 else {
b6016b76
MC
2169 bnx2_write_phy(bp, 0x18, 0x7);
2170 bnx2_read_phy(bp, 0x18, &val);
2171 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2172
2173 bnx2_read_phy(bp, 0x10, &val);
2174 bnx2_write_phy(bp, 0x10, val & ~0x1);
2175 }
2176
5b0c76ad
MC
2177 /* ethernet@wirespeed */
2178 bnx2_write_phy(bp, 0x18, 0x7007);
2179 bnx2_read_phy(bp, 0x18, &val);
2180 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
b6016b76
MC
2181 return 0;
2182}
2183
2184
2185static int
9a120bc5 2186bnx2_init_phy(struct bnx2 *bp, int reset_phy)
52d07b1f
HH
2187__releases(&bp->phy_lock)
2188__acquires(&bp->phy_lock)
b6016b76
MC
2189{
2190 u32 val;
2191 int rc = 0;
2192
583c28e5
MC
2193 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2194 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
b6016b76 2195
ca58c3af
MC
2196 bp->mii_bmcr = MII_BMCR;
2197 bp->mii_bmsr = MII_BMSR;
27a005b8 2198 bp->mii_bmsr1 = MII_BMSR;
ca58c3af
MC
2199 bp->mii_adv = MII_ADVERTISE;
2200 bp->mii_lpa = MII_LPA;
2201
b6016b76
MC
2202 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2203
583c28e5 2204 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
2205 goto setup_phy;
2206
b6016b76
MC
2207 bnx2_read_phy(bp, MII_PHYSID1, &val);
2208 bp->phy_id = val << 16;
2209 bnx2_read_phy(bp, MII_PHYSID2, &val);
2210 bp->phy_id |= val & 0xffff;
2211
583c28e5 2212 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5b0c76ad 2213 if (CHIP_NUM(bp) == CHIP_NUM_5706)
9a120bc5 2214 rc = bnx2_init_5706s_phy(bp, reset_phy);
5b0c76ad 2215 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
9a120bc5 2216 rc = bnx2_init_5708s_phy(bp, reset_phy);
27a005b8 2217 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
9a120bc5 2218 rc = bnx2_init_5709s_phy(bp, reset_phy);
b6016b76
MC
2219 }
2220 else {
9a120bc5 2221 rc = bnx2_init_copper_phy(bp, reset_phy);
b6016b76
MC
2222 }
2223
0d8a6571
MC
2224setup_phy:
2225 if (!rc)
2226 rc = bnx2_setup_phy(bp, bp->phy_port);
b6016b76
MC
2227
2228 return rc;
2229}
2230
2231static int
2232bnx2_set_mac_loopback(struct bnx2 *bp)
2233{
2234 u32 mac_mode;
2235
2236 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2237 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2238 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2239 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2240 bp->link_up = 1;
2241 return 0;
2242}
2243
bc5a0690
MC
2244static int bnx2_test_link(struct bnx2 *);
2245
2246static int
2247bnx2_set_phy_loopback(struct bnx2 *bp)
2248{
2249 u32 mac_mode;
2250 int rc, i;
2251
2252 spin_lock_bh(&bp->phy_lock);
ca58c3af 2253 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
bc5a0690
MC
2254 BMCR_SPEED1000);
2255 spin_unlock_bh(&bp->phy_lock);
2256 if (rc)
2257 return rc;
2258
2259 for (i = 0; i < 10; i++) {
2260 if (bnx2_test_link(bp) == 0)
2261 break;
80be4434 2262 msleep(100);
bc5a0690
MC
2263 }
2264
2265 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2266 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2267 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
59b47d8a 2268 BNX2_EMAC_MODE_25G_MODE);
bc5a0690
MC
2269
2270 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2271 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2272 bp->link_up = 1;
2273 return 0;
2274}
2275
b6016b76 2276static int
a2f13890 2277bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
b6016b76
MC
2278{
2279 int i;
2280 u32 val;
2281
b6016b76
MC
2282 bp->fw_wr_seq++;
2283 msg_data |= bp->fw_wr_seq;
2284
2726d6e1 2285 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
b6016b76 2286
a2f13890
MC
2287 if (!ack)
2288 return 0;
2289
b6016b76 2290 /* wait for an acknowledgement. */
40105c0b 2291 for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
b090ae2b 2292 msleep(10);
b6016b76 2293
2726d6e1 2294 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
b6016b76
MC
2295
2296 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2297 break;
2298 }
b090ae2b
MC
2299 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2300 return 0;
b6016b76
MC
2301
2302 /* If we timed out, inform the firmware that this is the case. */
b090ae2b
MC
2303 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2304 if (!silent)
2305 printk(KERN_ERR PFX "fw sync timeout, reset code = "
2306 "%x\n", msg_data);
b6016b76
MC
2307
2308 msg_data &= ~BNX2_DRV_MSG_CODE;
2309 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2310
2726d6e1 2311 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
b6016b76 2312
b6016b76
MC
2313 return -EBUSY;
2314 }
2315
b090ae2b
MC
2316 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2317 return -EIO;
2318
b6016b76
MC
2319 return 0;
2320}
2321
59b47d8a
MC
2322static int
2323bnx2_init_5709_context(struct bnx2 *bp)
2324{
2325 int i, ret = 0;
2326 u32 val;
2327
2328 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2329 val |= (BCM_PAGE_BITS - 8) << 16;
2330 REG_WR(bp, BNX2_CTX_COMMAND, val);
641bdcd5
MC
2331 for (i = 0; i < 10; i++) {
2332 val = REG_RD(bp, BNX2_CTX_COMMAND);
2333 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2334 break;
2335 udelay(2);
2336 }
2337 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2338 return -EBUSY;
2339
59b47d8a
MC
2340 for (i = 0; i < bp->ctx_pages; i++) {
2341 int j;
2342
352f7687
MC
2343 if (bp->ctx_blk[i])
2344 memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2345 else
2346 return -ENOMEM;
2347
59b47d8a
MC
2348 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2349 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2350 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2351 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2352 (u64) bp->ctx_blk_mapping[i] >> 32);
2353 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2354 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2355 for (j = 0; j < 10; j++) {
2356
2357 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2358 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2359 break;
2360 udelay(5);
2361 }
2362 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2363 ret = -EBUSY;
2364 break;
2365 }
2366 }
2367 return ret;
2368}
2369
b6016b76
MC
2370static void
2371bnx2_init_context(struct bnx2 *bp)
2372{
2373 u32 vcid;
2374
2375 vcid = 96;
2376 while (vcid) {
2377 u32 vcid_addr, pcid_addr, offset;
7947b20e 2378 int i;
b6016b76
MC
2379
2380 vcid--;
2381
2382 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2383 u32 new_vcid;
2384
2385 vcid_addr = GET_PCID_ADDR(vcid);
2386 if (vcid & 0x8) {
2387 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2388 }
2389 else {
2390 new_vcid = vcid;
2391 }
2392 pcid_addr = GET_PCID_ADDR(new_vcid);
2393 }
2394 else {
2395 vcid_addr = GET_CID_ADDR(vcid);
2396 pcid_addr = vcid_addr;
2397 }
2398
7947b20e
MC
2399 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2400 vcid_addr += (i << PHY_CTX_SHIFT);
2401 pcid_addr += (i << PHY_CTX_SHIFT);
b6016b76 2402
5d5d0015 2403 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
7947b20e 2404 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
b6016b76 2405
7947b20e
MC
2406 /* Zero out the context. */
2407 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
62a8313c 2408 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
7947b20e 2409 }
b6016b76
MC
2410 }
2411}
2412
2413static int
2414bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2415{
2416 u16 *good_mbuf;
2417 u32 good_mbuf_cnt;
2418 u32 val;
2419
2420 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2421 if (good_mbuf == NULL) {
2422 printk(KERN_ERR PFX "Failed to allocate memory in "
2423 "bnx2_alloc_bad_rbuf\n");
2424 return -ENOMEM;
2425 }
2426
2427 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2428 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2429
2430 good_mbuf_cnt = 0;
2431
2432 /* Allocate a bunch of mbufs and save the good ones in an array. */
2726d6e1 2433 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
b6016b76 2434 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2726d6e1
MC
2435 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2436 BNX2_RBUF_COMMAND_ALLOC_REQ);
b6016b76 2437
2726d6e1 2438 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
b6016b76
MC
2439
2440 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2441
2442 /* The addresses with Bit 9 set are bad memory blocks. */
2443 if (!(val & (1 << 9))) {
2444 good_mbuf[good_mbuf_cnt] = (u16) val;
2445 good_mbuf_cnt++;
2446 }
2447
2726d6e1 2448 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
b6016b76
MC
2449 }
2450
2451 /* Free the good ones back to the mbuf pool thus discarding
2452 * all the bad ones. */
2453 while (good_mbuf_cnt) {
2454 good_mbuf_cnt--;
2455
2456 val = good_mbuf[good_mbuf_cnt];
2457 val = (val << 9) | val | 1;
2458
2726d6e1 2459 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
b6016b76
MC
2460 }
2461 kfree(good_mbuf);
2462 return 0;
2463}
2464
2465static void
5fcaed01 2466bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
b6016b76
MC
2467{
2468 u32 val;
b6016b76
MC
2469
2470 val = (mac_addr[0] << 8) | mac_addr[1];
2471
5fcaed01 2472 REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
b6016b76 2473
6aa20a22 2474 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
b6016b76
MC
2475 (mac_addr[4] << 8) | mac_addr[5];
2476
5fcaed01 2477 REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
b6016b76
MC
2478}
2479
47bf4246 2480static inline int
bb4f98ab 2481bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
47bf4246
MC
2482{
2483 dma_addr_t mapping;
bb4f98ab 2484 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
47bf4246 2485 struct rx_bd *rxbd =
bb4f98ab 2486 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
47bf4246
MC
2487 struct page *page = alloc_page(GFP_ATOMIC);
2488
2489 if (!page)
2490 return -ENOMEM;
2491 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2492 PCI_DMA_FROMDEVICE);
3d16af86
BL
2493 if (pci_dma_mapping_error(bp->pdev, mapping)) {
2494 __free_page(page);
2495 return -EIO;
2496 }
2497
47bf4246
MC
2498 rx_pg->page = page;
2499 pci_unmap_addr_set(rx_pg, mapping, mapping);
2500 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2501 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2502 return 0;
2503}
2504
2505static void
bb4f98ab 2506bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
47bf4246 2507{
bb4f98ab 2508 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
47bf4246
MC
2509 struct page *page = rx_pg->page;
2510
2511 if (!page)
2512 return;
2513
2514 pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2515 PCI_DMA_FROMDEVICE);
2516
2517 __free_page(page);
2518 rx_pg->page = NULL;
2519}
2520
b6016b76 2521static inline int
bb4f98ab 2522bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
b6016b76
MC
2523{
2524 struct sk_buff *skb;
bb4f98ab 2525 struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
b6016b76 2526 dma_addr_t mapping;
bb4f98ab 2527 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
b6016b76
MC
2528 unsigned long align;
2529
932f3772 2530 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
b6016b76
MC
2531 if (skb == NULL) {
2532 return -ENOMEM;
2533 }
2534
59b47d8a
MC
2535 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2536 skb_reserve(skb, BNX2_RX_ALIGN - align);
b6016b76 2537
b6016b76
MC
2538 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2539 PCI_DMA_FROMDEVICE);
3d16af86
BL
2540 if (pci_dma_mapping_error(bp->pdev, mapping)) {
2541 dev_kfree_skb(skb);
2542 return -EIO;
2543 }
b6016b76
MC
2544
2545 rx_buf->skb = skb;
2546 pci_unmap_addr_set(rx_buf, mapping, mapping);
2547
2548 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2549 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2550
bb4f98ab 2551 rxr->rx_prod_bseq += bp->rx_buf_use_size;
b6016b76
MC
2552
2553 return 0;
2554}
2555
da3e4fbe 2556static int
35efa7c1 2557bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
b6016b76 2558{
43e80b89 2559 struct status_block *sblk = bnapi->status_blk.msi;
b6016b76 2560 u32 new_link_state, old_link_state;
da3e4fbe 2561 int is_set = 1;
b6016b76 2562
da3e4fbe
MC
2563 new_link_state = sblk->status_attn_bits & event;
2564 old_link_state = sblk->status_attn_bits_ack & event;
b6016b76 2565 if (new_link_state != old_link_state) {
da3e4fbe
MC
2566 if (new_link_state)
2567 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2568 else
2569 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2570 } else
2571 is_set = 0;
2572
2573 return is_set;
2574}
2575
2576static void
35efa7c1 2577bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
da3e4fbe 2578{
74ecc62d
MC
2579 spin_lock(&bp->phy_lock);
2580
2581 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
b6016b76 2582 bnx2_set_link(bp);
35efa7c1 2583 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
0d8a6571
MC
2584 bnx2_set_remote_link(bp);
2585
74ecc62d
MC
2586 spin_unlock(&bp->phy_lock);
2587
b6016b76
MC
2588}
2589
ead7270b 2590static inline u16
35efa7c1 2591bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
ead7270b
MC
2592{
2593 u16 cons;
2594
43e80b89
MC
2595 /* Tell compiler that status block fields can change. */
2596 barrier();
2597 cons = *bnapi->hw_tx_cons_ptr;
ead7270b
MC
2598 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2599 cons++;
2600 return cons;
2601}
2602
57851d84
MC
2603static int
2604bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
b6016b76 2605{
35e9010b 2606 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
b6016b76 2607 u16 hw_cons, sw_cons, sw_ring_cons;
706bf240
BL
2608 int tx_pkt = 0, index;
2609 struct netdev_queue *txq;
2610
2611 index = (bnapi - bp->bnx2_napi);
2612 txq = netdev_get_tx_queue(bp->dev, index);
b6016b76 2613
35efa7c1 2614 hw_cons = bnx2_get_hw_tx_cons(bnapi);
35e9010b 2615 sw_cons = txr->tx_cons;
b6016b76
MC
2616
2617 while (sw_cons != hw_cons) {
3d16af86 2618 struct sw_tx_bd *tx_buf;
b6016b76
MC
2619 struct sk_buff *skb;
2620 int i, last;
2621
2622 sw_ring_cons = TX_RING_IDX(sw_cons);
2623
35e9010b 2624 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
b6016b76 2625 skb = tx_buf->skb;
1d39ed56 2626
b6016b76 2627 /* partial BD completions possible with TSO packets */
89114afd 2628 if (skb_is_gso(skb)) {
b6016b76
MC
2629 u16 last_idx, last_ring_idx;
2630
2631 last_idx = sw_cons +
2632 skb_shinfo(skb)->nr_frags + 1;
2633 last_ring_idx = sw_ring_cons +
2634 skb_shinfo(skb)->nr_frags + 1;
2635 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2636 last_idx++;
2637 }
2638 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2639 break;
2640 }
2641 }
1d39ed56 2642
3d16af86 2643 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
b6016b76
MC
2644
2645 tx_buf->skb = NULL;
2646 last = skb_shinfo(skb)->nr_frags;
2647
2648 for (i = 0; i < last; i++) {
2649 sw_cons = NEXT_TX_BD(sw_cons);
b6016b76
MC
2650 }
2651
2652 sw_cons = NEXT_TX_BD(sw_cons);
2653
745720e5 2654 dev_kfree_skb(skb);
57851d84
MC
2655 tx_pkt++;
2656 if (tx_pkt == budget)
2657 break;
b6016b76 2658
35efa7c1 2659 hw_cons = bnx2_get_hw_tx_cons(bnapi);
b6016b76
MC
2660 }
2661
35e9010b
MC
2662 txr->hw_tx_cons = hw_cons;
2663 txr->tx_cons = sw_cons;
706bf240 2664
2f8af120 2665 /* Need to make the tx_cons update visible to bnx2_start_xmit()
706bf240 2666 * before checking for netif_tx_queue_stopped(). Without the
2f8af120
MC
2667 * memory barrier, there is a small possibility that bnx2_start_xmit()
2668 * will miss it and cause the queue to be stopped forever.
2669 */
2670 smp_mb();
b6016b76 2671
706bf240 2672 if (unlikely(netif_tx_queue_stopped(txq)) &&
35e9010b 2673 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
706bf240
BL
2674 __netif_tx_lock(txq, smp_processor_id());
2675 if ((netif_tx_queue_stopped(txq)) &&
35e9010b 2676 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
706bf240
BL
2677 netif_tx_wake_queue(txq);
2678 __netif_tx_unlock(txq);
b6016b76 2679 }
706bf240 2680
57851d84 2681 return tx_pkt;
b6016b76
MC
2682}
2683
1db82f2a 2684static void
bb4f98ab 2685bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
a1f60190 2686 struct sk_buff *skb, int count)
1db82f2a
MC
2687{
2688 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2689 struct rx_bd *cons_bd, *prod_bd;
1db82f2a 2690 int i;
3d16af86 2691 u16 hw_prod, prod;
bb4f98ab 2692 u16 cons = rxr->rx_pg_cons;
1db82f2a 2693
3d16af86
BL
2694 cons_rx_pg = &rxr->rx_pg_ring[cons];
2695
2696 /* The caller was unable to allocate a new page to replace the
2697 * last one in the frags array, so we need to recycle that page
2698 * and then free the skb.
2699 */
2700 if (skb) {
2701 struct page *page;
2702 struct skb_shared_info *shinfo;
2703
2704 shinfo = skb_shinfo(skb);
2705 shinfo->nr_frags--;
2706 page = shinfo->frags[shinfo->nr_frags].page;
2707 shinfo->frags[shinfo->nr_frags].page = NULL;
2708
2709 cons_rx_pg->page = page;
2710 dev_kfree_skb(skb);
2711 }
2712
2713 hw_prod = rxr->rx_pg_prod;
2714
1db82f2a
MC
2715 for (i = 0; i < count; i++) {
2716 prod = RX_PG_RING_IDX(hw_prod);
2717
bb4f98ab
MC
2718 prod_rx_pg = &rxr->rx_pg_ring[prod];
2719 cons_rx_pg = &rxr->rx_pg_ring[cons];
2720 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2721 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1db82f2a 2722
1db82f2a
MC
2723 if (prod != cons) {
2724 prod_rx_pg->page = cons_rx_pg->page;
2725 cons_rx_pg->page = NULL;
2726 pci_unmap_addr_set(prod_rx_pg, mapping,
2727 pci_unmap_addr(cons_rx_pg, mapping));
2728
2729 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2730 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2731
2732 }
2733 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2734 hw_prod = NEXT_RX_BD(hw_prod);
2735 }
bb4f98ab
MC
2736 rxr->rx_pg_prod = hw_prod;
2737 rxr->rx_pg_cons = cons;
1db82f2a
MC
2738}
2739
b6016b76 2740static inline void
bb4f98ab
MC
2741bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2742 struct sk_buff *skb, u16 cons, u16 prod)
b6016b76 2743{
236b6394
MC
2744 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2745 struct rx_bd *cons_bd, *prod_bd;
2746
bb4f98ab
MC
2747 cons_rx_buf = &rxr->rx_buf_ring[cons];
2748 prod_rx_buf = &rxr->rx_buf_ring[prod];
b6016b76
MC
2749
2750 pci_dma_sync_single_for_device(bp->pdev,
2751 pci_unmap_addr(cons_rx_buf, mapping),
601d3d18 2752 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
b6016b76 2753
bb4f98ab 2754 rxr->rx_prod_bseq += bp->rx_buf_use_size;
b6016b76 2755
236b6394 2756 prod_rx_buf->skb = skb;
b6016b76 2757
236b6394
MC
2758 if (cons == prod)
2759 return;
b6016b76 2760
236b6394
MC
2761 pci_unmap_addr_set(prod_rx_buf, mapping,
2762 pci_unmap_addr(cons_rx_buf, mapping));
2763
bb4f98ab
MC
2764 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2765 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
236b6394
MC
2766 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2767 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
b6016b76
MC
2768}
2769
85833c62 2770static int
bb4f98ab 2771bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
a1f60190
MC
2772 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2773 u32 ring_idx)
85833c62
MC
2774{
2775 int err;
2776 u16 prod = ring_idx & 0xffff;
2777
bb4f98ab 2778 err = bnx2_alloc_rx_skb(bp, rxr, prod);
85833c62 2779 if (unlikely(err)) {
bb4f98ab 2780 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
1db82f2a
MC
2781 if (hdr_len) {
2782 unsigned int raw_len = len + 4;
2783 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2784
bb4f98ab 2785 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
1db82f2a 2786 }
85833c62
MC
2787 return err;
2788 }
2789
d89cb6af 2790 skb_reserve(skb, BNX2_RX_OFFSET);
85833c62
MC
2791 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2792 PCI_DMA_FROMDEVICE);
2793
1db82f2a
MC
2794 if (hdr_len == 0) {
2795 skb_put(skb, len);
2796 return 0;
2797 } else {
2798 unsigned int i, frag_len, frag_size, pages;
2799 struct sw_pg *rx_pg;
bb4f98ab
MC
2800 u16 pg_cons = rxr->rx_pg_cons;
2801 u16 pg_prod = rxr->rx_pg_prod;
1db82f2a
MC
2802
2803 frag_size = len + 4 - hdr_len;
2804 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2805 skb_put(skb, hdr_len);
2806
2807 for (i = 0; i < pages; i++) {
3d16af86
BL
2808 dma_addr_t mapping_old;
2809
1db82f2a
MC
2810 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2811 if (unlikely(frag_len <= 4)) {
2812 unsigned int tail = 4 - frag_len;
2813
bb4f98ab
MC
2814 rxr->rx_pg_cons = pg_cons;
2815 rxr->rx_pg_prod = pg_prod;
2816 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
a1f60190 2817 pages - i);
1db82f2a
MC
2818 skb->len -= tail;
2819 if (i == 0) {
2820 skb->tail -= tail;
2821 } else {
2822 skb_frag_t *frag =
2823 &skb_shinfo(skb)->frags[i - 1];
2824 frag->size -= tail;
2825 skb->data_len -= tail;
2826 skb->truesize -= tail;
2827 }
2828 return 0;
2829 }
bb4f98ab 2830 rx_pg = &rxr->rx_pg_ring[pg_cons];
1db82f2a 2831
3d16af86
BL
2832 /* Don't unmap yet. If we're unable to allocate a new
2833 * page, we need to recycle the page and the DMA addr.
2834 */
2835 mapping_old = pci_unmap_addr(rx_pg, mapping);
1db82f2a
MC
2836 if (i == pages - 1)
2837 frag_len -= 4;
2838
2839 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
2840 rx_pg->page = NULL;
2841
bb4f98ab
MC
2842 err = bnx2_alloc_rx_page(bp, rxr,
2843 RX_PG_RING_IDX(pg_prod));
1db82f2a 2844 if (unlikely(err)) {
bb4f98ab
MC
2845 rxr->rx_pg_cons = pg_cons;
2846 rxr->rx_pg_prod = pg_prod;
2847 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
a1f60190 2848 pages - i);
1db82f2a
MC
2849 return err;
2850 }
2851
3d16af86
BL
2852 pci_unmap_page(bp->pdev, mapping_old,
2853 PAGE_SIZE, PCI_DMA_FROMDEVICE);
2854
1db82f2a
MC
2855 frag_size -= frag_len;
2856 skb->data_len += frag_len;
2857 skb->truesize += frag_len;
2858 skb->len += frag_len;
2859
2860 pg_prod = NEXT_RX_BD(pg_prod);
2861 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
2862 }
bb4f98ab
MC
2863 rxr->rx_pg_prod = pg_prod;
2864 rxr->rx_pg_cons = pg_cons;
1db82f2a 2865 }
85833c62
MC
2866 return 0;
2867}
2868
c09c2627 2869static inline u16
35efa7c1 2870bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
c09c2627 2871{
bb4f98ab
MC
2872 u16 cons;
2873
43e80b89
MC
2874 /* Tell compiler that status block fields can change. */
2875 barrier();
2876 cons = *bnapi->hw_rx_cons_ptr;
c09c2627
MC
2877 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
2878 cons++;
2879 return cons;
2880}
2881
b6016b76 2882static int
35efa7c1 2883bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
b6016b76 2884{
bb4f98ab 2885 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
b6016b76
MC
2886 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
2887 struct l2_fhdr *rx_hdr;
1db82f2a 2888 int rx_pkt = 0, pg_ring_used = 0;
b6016b76 2889
35efa7c1 2890 hw_cons = bnx2_get_hw_rx_cons(bnapi);
bb4f98ab
MC
2891 sw_cons = rxr->rx_cons;
2892 sw_prod = rxr->rx_prod;
b6016b76
MC
2893
2894 /* Memory barrier necessary as speculative reads of the rx
2895 * buffer can be ahead of the index in the status block
2896 */
2897 rmb();
2898 while (sw_cons != hw_cons) {
1db82f2a 2899 unsigned int len, hdr_len;
ade2bfe7 2900 u32 status;
b6016b76
MC
2901 struct sw_bd *rx_buf;
2902 struct sk_buff *skb;
236b6394 2903 dma_addr_t dma_addr;
f22828e8
MC
2904 u16 vtag = 0;
2905 int hw_vlan __maybe_unused = 0;
b6016b76
MC
2906
2907 sw_ring_cons = RX_RING_IDX(sw_cons);
2908 sw_ring_prod = RX_RING_IDX(sw_prod);
2909
bb4f98ab 2910 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
b6016b76 2911 skb = rx_buf->skb;
236b6394
MC
2912
2913 rx_buf->skb = NULL;
2914
2915 dma_addr = pci_unmap_addr(rx_buf, mapping);
2916
2917 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
601d3d18
BL
2918 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
2919 PCI_DMA_FROMDEVICE);
b6016b76
MC
2920
2921 rx_hdr = (struct l2_fhdr *) skb->data;
1db82f2a 2922 len = rx_hdr->l2_fhdr_pkt_len;
b6016b76 2923
ade2bfe7 2924 if ((status = rx_hdr->l2_fhdr_status) &
b6016b76
MC
2925 (L2_FHDR_ERRORS_BAD_CRC |
2926 L2_FHDR_ERRORS_PHY_DECODE |
2927 L2_FHDR_ERRORS_ALIGNMENT |
2928 L2_FHDR_ERRORS_TOO_SHORT |
2929 L2_FHDR_ERRORS_GIANT_FRAME)) {
2930
bb4f98ab 2931 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
a1f60190 2932 sw_ring_prod);
85833c62 2933 goto next_rx;
b6016b76 2934 }
1db82f2a
MC
2935 hdr_len = 0;
2936 if (status & L2_FHDR_STATUS_SPLIT) {
2937 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
2938 pg_ring_used = 1;
2939 } else if (len > bp->rx_jumbo_thresh) {
2940 hdr_len = bp->rx_jumbo_thresh;
2941 pg_ring_used = 1;
2942 }
2943
2944 len -= 4;
b6016b76 2945
5d5d0015 2946 if (len <= bp->rx_copy_thresh) {
b6016b76
MC
2947 struct sk_buff *new_skb;
2948
f22828e8 2949 new_skb = netdev_alloc_skb(bp->dev, len + 6);
85833c62 2950 if (new_skb == NULL) {
bb4f98ab 2951 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
85833c62
MC
2952 sw_ring_prod);
2953 goto next_rx;
2954 }
b6016b76
MC
2955
2956 /* aligned copy */
d89cb6af 2957 skb_copy_from_linear_data_offset(skb,
f22828e8
MC
2958 BNX2_RX_OFFSET - 6,
2959 new_skb->data, len + 6);
2960 skb_reserve(new_skb, 6);
b6016b76 2961 skb_put(new_skb, len);
b6016b76 2962
bb4f98ab 2963 bnx2_reuse_rx_skb(bp, rxr, skb,
b6016b76
MC
2964 sw_ring_cons, sw_ring_prod);
2965
2966 skb = new_skb;
bb4f98ab 2967 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
a1f60190 2968 dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
b6016b76 2969 goto next_rx;
b6016b76 2970
f22828e8
MC
2971 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
2972 !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
2973 vtag = rx_hdr->l2_fhdr_vlan_tag;
2974#ifdef BCM_VLAN
2975 if (bp->vlgrp)
2976 hw_vlan = 1;
2977 else
2978#endif
2979 {
2980 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
2981 __skb_push(skb, 4);
2982
2983 memmove(ve, skb->data + 4, ETH_ALEN * 2);
2984 ve->h_vlan_proto = htons(ETH_P_8021Q);
2985 ve->h_vlan_TCI = htons(vtag);
2986 len += 4;
2987 }
2988 }
2989
b6016b76
MC
2990 skb->protocol = eth_type_trans(skb, bp->dev);
2991
2992 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
d1e100ba 2993 (ntohs(skb->protocol) != 0x8100)) {
b6016b76 2994
745720e5 2995 dev_kfree_skb(skb);
b6016b76
MC
2996 goto next_rx;
2997
2998 }
2999
b6016b76
MC
3000 skb->ip_summed = CHECKSUM_NONE;
3001 if (bp->rx_csum &&
3002 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3003 L2_FHDR_STATUS_UDP_DATAGRAM))) {
3004
ade2bfe7
MC
3005 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3006 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
b6016b76
MC
3007 skb->ip_summed = CHECKSUM_UNNECESSARY;
3008 }
3009
3010#ifdef BCM_VLAN
f22828e8
MC
3011 if (hw_vlan)
3012 vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
b6016b76
MC
3013 else
3014#endif
3015 netif_receive_skb(skb);
3016
b6016b76
MC
3017 rx_pkt++;
3018
3019next_rx:
b6016b76
MC
3020 sw_cons = NEXT_RX_BD(sw_cons);
3021 sw_prod = NEXT_RX_BD(sw_prod);
3022
3023 if ((rx_pkt == budget))
3024 break;
f4e418f7
MC
3025
3026 /* Refresh hw_cons to see if there is new work */
3027 if (sw_cons == hw_cons) {
35efa7c1 3028 hw_cons = bnx2_get_hw_rx_cons(bnapi);
f4e418f7
MC
3029 rmb();
3030 }
b6016b76 3031 }
bb4f98ab
MC
3032 rxr->rx_cons = sw_cons;
3033 rxr->rx_prod = sw_prod;
b6016b76 3034
1db82f2a 3035 if (pg_ring_used)
bb4f98ab 3036 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
1db82f2a 3037
bb4f98ab 3038 REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
b6016b76 3039
bb4f98ab 3040 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
b6016b76
MC
3041
3042 mmiowb();
3043
3044 return rx_pkt;
3045
3046}
3047
3048/* MSI ISR - The only difference between this and the INTx ISR
3049 * is that the MSI interrupt is always serviced.
3050 */
3051static irqreturn_t
7d12e780 3052bnx2_msi(int irq, void *dev_instance)
b6016b76 3053{
f0ea2e63
MC
3054 struct bnx2_napi *bnapi = dev_instance;
3055 struct bnx2 *bp = bnapi->bp;
b6016b76 3056
43e80b89 3057 prefetch(bnapi->status_blk.msi);
b6016b76
MC
3058 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3059 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3060 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3061
3062 /* Return here if interrupt is disabled. */
73eef4cd
MC
3063 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3064 return IRQ_HANDLED;
b6016b76 3065
288379f0 3066 napi_schedule(&bnapi->napi);
b6016b76 3067
73eef4cd 3068 return IRQ_HANDLED;
b6016b76
MC
3069}
3070
8e6a72c4
MC
3071static irqreturn_t
3072bnx2_msi_1shot(int irq, void *dev_instance)
3073{
f0ea2e63
MC
3074 struct bnx2_napi *bnapi = dev_instance;
3075 struct bnx2 *bp = bnapi->bp;
8e6a72c4 3076
43e80b89 3077 prefetch(bnapi->status_blk.msi);
8e6a72c4
MC
3078
3079 /* Return here if interrupt is disabled. */
3080 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3081 return IRQ_HANDLED;
3082
288379f0 3083 napi_schedule(&bnapi->napi);
8e6a72c4
MC
3084
3085 return IRQ_HANDLED;
3086}
3087
b6016b76 3088static irqreturn_t
7d12e780 3089bnx2_interrupt(int irq, void *dev_instance)
b6016b76 3090{
f0ea2e63
MC
3091 struct bnx2_napi *bnapi = dev_instance;
3092 struct bnx2 *bp = bnapi->bp;
43e80b89 3093 struct status_block *sblk = bnapi->status_blk.msi;
b6016b76
MC
3094
3095 /* When using INTx, it is possible for the interrupt to arrive
3096 * at the CPU before the status block posted prior to the
3097 * interrupt. Reading a register will flush the status block.
3098 * When using MSI, the MSI message will always complete after
3099 * the status block write.
3100 */
35efa7c1 3101 if ((sblk->status_idx == bnapi->last_status_idx) &&
b6016b76
MC
3102 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3103 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
73eef4cd 3104 return IRQ_NONE;
b6016b76
MC
3105
3106 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3107 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3108 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3109
b8a7ce7b
MC
3110 /* Read back to deassert IRQ immediately to avoid too many
3111 * spurious interrupts.
3112 */
3113 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3114
b6016b76 3115 /* Return here if interrupt is shared and is disabled. */
73eef4cd
MC
3116 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3117 return IRQ_HANDLED;
b6016b76 3118
288379f0 3119 if (napi_schedule_prep(&bnapi->napi)) {
35efa7c1 3120 bnapi->last_status_idx = sblk->status_idx;
288379f0 3121 __napi_schedule(&bnapi->napi);
b8a7ce7b 3122 }
b6016b76 3123
73eef4cd 3124 return IRQ_HANDLED;
b6016b76
MC
3125}
3126
f4e418f7 3127static inline int
43e80b89 3128bnx2_has_fast_work(struct bnx2_napi *bnapi)
f4e418f7 3129{
35e9010b 3130 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
bb4f98ab 3131 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
f4e418f7 3132
bb4f98ab 3133 if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
35e9010b 3134 (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
f4e418f7 3135 return 1;
43e80b89
MC
3136 return 0;
3137}
3138
3139#define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
3140 STATUS_ATTN_BITS_TIMER_ABORT)
3141
3142static inline int
3143bnx2_has_work(struct bnx2_napi *bnapi)
3144{
3145 struct status_block *sblk = bnapi->status_blk.msi;
3146
3147 if (bnx2_has_fast_work(bnapi))
3148 return 1;
f4e418f7 3149
da3e4fbe
MC
3150 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3151 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
f4e418f7
MC
3152 return 1;
3153
3154 return 0;
3155}
3156
efba0180
MC
3157static void
3158bnx2_chk_missed_msi(struct bnx2 *bp)
3159{
3160 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3161 u32 msi_ctrl;
3162
3163 if (bnx2_has_work(bnapi)) {
3164 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3165 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3166 return;
3167
3168 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3169 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3170 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3171 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3172 bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3173 }
3174 }
3175
3176 bp->idle_chk_status_idx = bnapi->last_status_idx;
3177}
3178
43e80b89 3179static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
b6016b76 3180{
43e80b89 3181 struct status_block *sblk = bnapi->status_blk.msi;
da3e4fbe
MC
3182 u32 status_attn_bits = sblk->status_attn_bits;
3183 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
b6016b76 3184
da3e4fbe
MC
3185 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3186 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
b6016b76 3187
35efa7c1 3188 bnx2_phy_int(bp, bnapi);
bf5295bb
MC
3189
3190 /* This is needed to take care of transient status
3191 * during link changes.
3192 */
3193 REG_WR(bp, BNX2_HC_COMMAND,
3194 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3195 REG_RD(bp, BNX2_HC_COMMAND);
b6016b76 3196 }
43e80b89
MC
3197}
3198
3199static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3200 int work_done, int budget)
3201{
3202 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3203 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
b6016b76 3204
35e9010b 3205 if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
57851d84 3206 bnx2_tx_int(bp, bnapi, 0);
b6016b76 3207
bb4f98ab 3208 if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
35efa7c1 3209 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
6aa20a22 3210
6f535763
DM
3211 return work_done;
3212}
3213
f0ea2e63
MC
3214static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3215{
3216 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3217 struct bnx2 *bp = bnapi->bp;
3218 int work_done = 0;
3219 struct status_block_msix *sblk = bnapi->status_blk.msix;
3220
3221 while (1) {
3222 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3223 if (unlikely(work_done >= budget))
3224 break;
3225
3226 bnapi->last_status_idx = sblk->status_idx;
3227 /* status idx must be read before checking for more work. */
3228 rmb();
3229 if (likely(!bnx2_has_fast_work(bnapi))) {
3230
288379f0 3231 napi_complete(napi);
f0ea2e63
MC
3232 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3233 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3234 bnapi->last_status_idx);
3235 break;
3236 }
3237 }
3238 return work_done;
3239}
3240
6f535763
DM
3241static int bnx2_poll(struct napi_struct *napi, int budget)
3242{
35efa7c1
MC
3243 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3244 struct bnx2 *bp = bnapi->bp;
6f535763 3245 int work_done = 0;
43e80b89 3246 struct status_block *sblk = bnapi->status_blk.msi;
6f535763
DM
3247
3248 while (1) {
43e80b89
MC
3249 bnx2_poll_link(bp, bnapi);
3250
35efa7c1 3251 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
f4e418f7 3252
35efa7c1 3253 /* bnapi->last_status_idx is used below to tell the hw how
6dee6421
MC
3254 * much work has been processed, so we must read it before
3255 * checking for more work.
3256 */
35efa7c1 3257 bnapi->last_status_idx = sblk->status_idx;
efba0180
MC
3258
3259 if (unlikely(work_done >= budget))
3260 break;
3261
6dee6421 3262 rmb();
35efa7c1 3263 if (likely(!bnx2_has_work(bnapi))) {
288379f0 3264 napi_complete(napi);
f86e82fb 3265 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
6f535763
DM
3266 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3267 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
35efa7c1 3268 bnapi->last_status_idx);
6dee6421 3269 break;
6f535763 3270 }
1269a8a6
MC
3271 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3272 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
6f535763 3273 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
35efa7c1 3274 bnapi->last_status_idx);
1269a8a6 3275
6f535763
DM
3276 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3277 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
35efa7c1 3278 bnapi->last_status_idx);
6f535763
DM
3279 break;
3280 }
b6016b76
MC
3281 }
3282
bea3348e 3283 return work_done;
b6016b76
MC
3284}
3285
932ff279 3286/* Called with rtnl_lock from vlan functions and also netif_tx_lock
b6016b76
MC
3287 * from set_multicast.
3288 */
3289static void
3290bnx2_set_rx_mode(struct net_device *dev)
3291{
972ec0d4 3292 struct bnx2 *bp = netdev_priv(dev);
b6016b76 3293 u32 rx_mode, sort_mode;
5fcaed01 3294 struct dev_addr_list *uc_ptr;
b6016b76 3295 int i;
b6016b76 3296
9f52b564
MC
3297 if (!netif_running(dev))
3298 return;
3299
c770a65c 3300 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
3301
3302 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3303 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3304 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3305#ifdef BCM_VLAN
7c6337a1 3306 if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
b6016b76 3307 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
b6016b76 3308#else
7c6337a1 3309 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
e29054f9 3310 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
b6016b76
MC
3311#endif
3312 if (dev->flags & IFF_PROMISC) {
3313 /* Promiscuous mode. */
3314 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
7510873d
MC
3315 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3316 BNX2_RPM_SORT_USER0_PROM_VLAN;
b6016b76
MC
3317 }
3318 else if (dev->flags & IFF_ALLMULTI) {
3319 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3320 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3321 0xffffffff);
3322 }
3323 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3324 }
3325 else {
3326 /* Accept one or more multicast(s). */
3327 struct dev_mc_list *mclist;
3328 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3329 u32 regidx;
3330 u32 bit;
3331 u32 crc;
3332
3333 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3334
3335 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3336 i++, mclist = mclist->next) {
3337
3338 crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3339 bit = crc & 0xff;
3340 regidx = (bit & 0xe0) >> 5;
3341 bit &= 0x1f;
3342 mc_filter[regidx] |= (1 << bit);
3343 }
3344
3345 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3346 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3347 mc_filter[i]);
3348 }
3349
3350 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3351 }
3352
5fcaed01
BL
3353 uc_ptr = NULL;
3354 if (dev->uc_count > BNX2_MAX_UNICAST_ADDRESSES) {
3355 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3356 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3357 BNX2_RPM_SORT_USER0_PROM_VLAN;
3358 } else if (!(dev->flags & IFF_PROMISC)) {
3359 uc_ptr = dev->uc_list;
3360
3361 /* Add all entries into to the match filter list */
3362 for (i = 0; i < dev->uc_count; i++) {
3363 bnx2_set_mac_addr(bp, uc_ptr->da_addr,
3364 i + BNX2_START_UNICAST_ADDRESS_INDEX);
3365 sort_mode |= (1 <<
3366 (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3367 uc_ptr = uc_ptr->next;
3368 }
3369
3370 }
3371
b6016b76
MC
3372 if (rx_mode != bp->rx_mode) {
3373 bp->rx_mode = rx_mode;
3374 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3375 }
3376
3377 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3378 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3379 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3380
c770a65c 3381 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
3382}
3383
3384static void
b491edd5 3385load_rv2p_fw(struct bnx2 *bp, __le32 *rv2p_code, u32 rv2p_code_len,
b6016b76
MC
3386 u32 rv2p_proc)
3387{
3388 int i;
3389 u32 val;
3390
d25be1d3
MC
3391 if (rv2p_proc == RV2P_PROC2 && CHIP_NUM(bp) == CHIP_NUM_5709) {
3392 val = le32_to_cpu(rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC]);
3393 val &= ~XI_RV2P_PROC2_BD_PAGE_SIZE_MSK;
3394 val |= XI_RV2P_PROC2_BD_PAGE_SIZE;
3395 rv2p_code[XI_RV2P_PROC2_MAX_BD_PAGE_LOC] = cpu_to_le32(val);
3396 }
b6016b76
MC
3397
3398 for (i = 0; i < rv2p_code_len; i += 8) {
b491edd5 3399 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, le32_to_cpu(*rv2p_code));
b6016b76 3400 rv2p_code++;
b491edd5 3401 REG_WR(bp, BNX2_RV2P_INSTR_LOW, le32_to_cpu(*rv2p_code));
b6016b76
MC
3402 rv2p_code++;
3403
3404 if (rv2p_proc == RV2P_PROC1) {
3405 val = (i / 8) | BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3406 REG_WR(bp, BNX2_RV2P_PROC1_ADDR_CMD, val);
3407 }
3408 else {
3409 val = (i / 8) | BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3410 REG_WR(bp, BNX2_RV2P_PROC2_ADDR_CMD, val);
3411 }
3412 }
3413
3414 /* Reset the processor, un-stall is done later. */
3415 if (rv2p_proc == RV2P_PROC1) {
3416 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3417 }
3418 else {
3419 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3420 }
3421}
3422
af3ee519 3423static int
10343cca 3424load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg, struct fw_info *fw)
b6016b76
MC
3425{
3426 u32 offset;
3427 u32 val;
af3ee519 3428 int rc;
b6016b76
MC
3429
3430 /* Halt the CPU. */
2726d6e1 3431 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
b6016b76 3432 val |= cpu_reg->mode_value_halt;
2726d6e1
MC
3433 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3434 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
b6016b76
MC
3435
3436 /* Load the Text area. */
3437 offset = cpu_reg->spad_base + (fw->text_addr - cpu_reg->mips_view_base);
af3ee519 3438 if (fw->gz_text) {
b6016b76
MC
3439 int j;
3440
ea1f8d5c
MC
3441 rc = zlib_inflate_blob(fw->text, FW_BUF_SIZE, fw->gz_text,
3442 fw->gz_text_len);
3443 if (rc < 0)
b3448b0b 3444 return rc;
ea1f8d5c 3445
b6016b76 3446 for (j = 0; j < (fw->text_len / 4); j++, offset += 4) {
2726d6e1 3447 bnx2_reg_wr_ind(bp, offset, le32_to_cpu(fw->text[j]));
b6016b76
MC
3448 }
3449 }
3450
3451 /* Load the Data area. */
3452 offset = cpu_reg->spad_base + (fw->data_addr - cpu_reg->mips_view_base);
3453 if (fw->data) {
3454 int j;
3455
3456 for (j = 0; j < (fw->data_len / 4); j++, offset += 4) {
2726d6e1 3457 bnx2_reg_wr_ind(bp, offset, fw->data[j]);
b6016b76
MC
3458 }
3459 }
3460
3461 /* Load the SBSS area. */
3462 offset = cpu_reg->spad_base + (fw->sbss_addr - cpu_reg->mips_view_base);
ea1f8d5c 3463 if (fw->sbss_len) {
b6016b76
MC
3464 int j;
3465
3466 for (j = 0; j < (fw->sbss_len / 4); j++, offset += 4) {
2726d6e1 3467 bnx2_reg_wr_ind(bp, offset, 0);
b6016b76
MC
3468 }
3469 }
3470
3471 /* Load the BSS area. */
3472 offset = cpu_reg->spad_base + (fw->bss_addr - cpu_reg->mips_view_base);
ea1f8d5c 3473 if (fw->bss_len) {
b6016b76
MC
3474 int j;
3475
3476 for (j = 0; j < (fw->bss_len/4); j++, offset += 4) {
2726d6e1 3477 bnx2_reg_wr_ind(bp, offset, 0);
b6016b76
MC
3478 }
3479 }
3480
3481 /* Load the Read-Only area. */
3482 offset = cpu_reg->spad_base +
3483 (fw->rodata_addr - cpu_reg->mips_view_base);
3484 if (fw->rodata) {
3485 int j;
3486
3487 for (j = 0; j < (fw->rodata_len / 4); j++, offset += 4) {
2726d6e1 3488 bnx2_reg_wr_ind(bp, offset, fw->rodata[j]);
b6016b76
MC
3489 }
3490 }
3491
3492 /* Clear the pre-fetch instruction. */
2726d6e1
MC
3493 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3494 bnx2_reg_wr_ind(bp, cpu_reg->pc, fw->start_addr);
b6016b76
MC
3495
3496 /* Start the CPU. */
2726d6e1 3497 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
b6016b76 3498 val &= ~cpu_reg->mode_value_halt;
2726d6e1
MC
3499 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3500 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
af3ee519
MC
3501
3502 return 0;
b6016b76
MC
3503}
3504
fba9fe91 3505static int
b6016b76
MC
3506bnx2_init_cpus(struct bnx2 *bp)
3507{
af3ee519 3508 struct fw_info *fw;
110d0ef9
MC
3509 int rc, rv2p_len;
3510 void *text, *rv2p;
b6016b76
MC
3511
3512 /* Initialize the RV2P processor. */
b3448b0b
DV
3513 text = vmalloc(FW_BUF_SIZE);
3514 if (!text)
3515 return -ENOMEM;
110d0ef9
MC
3516 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3517 rv2p = bnx2_xi_rv2p_proc1;
3518 rv2p_len = sizeof(bnx2_xi_rv2p_proc1);
3519 } else {
3520 rv2p = bnx2_rv2p_proc1;
3521 rv2p_len = sizeof(bnx2_rv2p_proc1);
3522 }
3523 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
ea1f8d5c 3524 if (rc < 0)
fba9fe91 3525 goto init_cpu_err;
ea1f8d5c 3526
b3448b0b 3527 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC1);
fba9fe91 3528
110d0ef9
MC
3529 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3530 rv2p = bnx2_xi_rv2p_proc2;
3531 rv2p_len = sizeof(bnx2_xi_rv2p_proc2);
3532 } else {
3533 rv2p = bnx2_rv2p_proc2;
3534 rv2p_len = sizeof(bnx2_rv2p_proc2);
3535 }
3536 rc = zlib_inflate_blob(text, FW_BUF_SIZE, rv2p, rv2p_len);
ea1f8d5c 3537 if (rc < 0)
fba9fe91 3538 goto init_cpu_err;
ea1f8d5c 3539
b3448b0b 3540 load_rv2p_fw(bp, text, rc /* == len */, RV2P_PROC2);
b6016b76
MC
3541
3542 /* Initialize the RX Processor. */
d43584c8
MC
3543 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3544 fw = &bnx2_rxp_fw_09;
3545 else
3546 fw = &bnx2_rxp_fw_06;
fba9fe91 3547
ea1f8d5c 3548 fw->text = text;
10343cca 3549 rc = load_cpu_fw(bp, &cpu_reg_rxp, fw);
fba9fe91
MC
3550 if (rc)
3551 goto init_cpu_err;
3552
b6016b76 3553 /* Initialize the TX Processor. */
d43584c8
MC
3554 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3555 fw = &bnx2_txp_fw_09;
3556 else
3557 fw = &bnx2_txp_fw_06;
fba9fe91 3558
ea1f8d5c 3559 fw->text = text;
10343cca 3560 rc = load_cpu_fw(bp, &cpu_reg_txp, fw);
fba9fe91
MC
3561 if (rc)
3562 goto init_cpu_err;
3563
b6016b76 3564 /* Initialize the TX Patch-up Processor. */
d43584c8
MC
3565 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3566 fw = &bnx2_tpat_fw_09;
3567 else
3568 fw = &bnx2_tpat_fw_06;
fba9fe91 3569
ea1f8d5c 3570 fw->text = text;
10343cca 3571 rc = load_cpu_fw(bp, &cpu_reg_tpat, fw);
fba9fe91
MC
3572 if (rc)
3573 goto init_cpu_err;
3574
b6016b76 3575 /* Initialize the Completion Processor. */
d43584c8
MC
3576 if (CHIP_NUM(bp) == CHIP_NUM_5709)
3577 fw = &bnx2_com_fw_09;
3578 else
3579 fw = &bnx2_com_fw_06;
fba9fe91 3580
ea1f8d5c 3581 fw->text = text;
10343cca 3582 rc = load_cpu_fw(bp, &cpu_reg_com, fw);
fba9fe91
MC
3583 if (rc)
3584 goto init_cpu_err;
3585
d43584c8 3586 /* Initialize the Command Processor. */
110d0ef9 3587 if (CHIP_NUM(bp) == CHIP_NUM_5709)
d43584c8 3588 fw = &bnx2_cp_fw_09;
110d0ef9
MC
3589 else
3590 fw = &bnx2_cp_fw_06;
3591
3592 fw->text = text;
10343cca 3593 rc = load_cpu_fw(bp, &cpu_reg_cp, fw);
b6016b76 3594
fba9fe91 3595init_cpu_err:
ea1f8d5c 3596 vfree(text);
fba9fe91 3597 return rc;
b6016b76
MC
3598}
3599
3600static int
829ca9a3 3601bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
b6016b76
MC
3602{
3603 u16 pmcsr;
3604
3605 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3606
3607 switch (state) {
829ca9a3 3608 case PCI_D0: {
b6016b76
MC
3609 u32 val;
3610
3611 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3612 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3613 PCI_PM_CTRL_PME_STATUS);
3614
3615 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3616 /* delay required during transition out of D3hot */
3617 msleep(20);
3618
3619 val = REG_RD(bp, BNX2_EMAC_MODE);
3620 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3621 val &= ~BNX2_EMAC_MODE_MPKT;
3622 REG_WR(bp, BNX2_EMAC_MODE, val);
3623
3624 val = REG_RD(bp, BNX2_RPM_CONFIG);
3625 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3626 REG_WR(bp, BNX2_RPM_CONFIG, val);
3627 break;
3628 }
829ca9a3 3629 case PCI_D3hot: {
b6016b76
MC
3630 int i;
3631 u32 val, wol_msg;
3632
3633 if (bp->wol) {
3634 u32 advertising;
3635 u8 autoneg;
3636
3637 autoneg = bp->autoneg;
3638 advertising = bp->advertising;
3639
239cd343
MC
3640 if (bp->phy_port == PORT_TP) {
3641 bp->autoneg = AUTONEG_SPEED;
3642 bp->advertising = ADVERTISED_10baseT_Half |
3643 ADVERTISED_10baseT_Full |
3644 ADVERTISED_100baseT_Half |
3645 ADVERTISED_100baseT_Full |
3646 ADVERTISED_Autoneg;
3647 }
b6016b76 3648
239cd343
MC
3649 spin_lock_bh(&bp->phy_lock);
3650 bnx2_setup_phy(bp, bp->phy_port);
3651 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
3652
3653 bp->autoneg = autoneg;
3654 bp->advertising = advertising;
3655
5fcaed01 3656 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
b6016b76
MC
3657
3658 val = REG_RD(bp, BNX2_EMAC_MODE);
3659
3660 /* Enable port mode. */
3661 val &= ~BNX2_EMAC_MODE_PORT;
239cd343 3662 val |= BNX2_EMAC_MODE_MPKT_RCVD |
b6016b76 3663 BNX2_EMAC_MODE_ACPI_RCVD |
b6016b76 3664 BNX2_EMAC_MODE_MPKT;
239cd343
MC
3665 if (bp->phy_port == PORT_TP)
3666 val |= BNX2_EMAC_MODE_PORT_MII;
3667 else {
3668 val |= BNX2_EMAC_MODE_PORT_GMII;
3669 if (bp->line_speed == SPEED_2500)
3670 val |= BNX2_EMAC_MODE_25G_MODE;
3671 }
b6016b76
MC
3672
3673 REG_WR(bp, BNX2_EMAC_MODE, val);
3674
3675 /* receive all multicast */
3676 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3677 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3678 0xffffffff);
3679 }
3680 REG_WR(bp, BNX2_EMAC_RX_MODE,
3681 BNX2_EMAC_RX_MODE_SORT_MODE);
3682
3683 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3684 BNX2_RPM_SORT_USER0_MC_EN;
3685 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3686 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3687 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3688 BNX2_RPM_SORT_USER0_ENA);
3689
3690 /* Need to enable EMAC and RPM for WOL. */
3691 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3692 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3693 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3694 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3695
3696 val = REG_RD(bp, BNX2_RPM_CONFIG);
3697 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3698 REG_WR(bp, BNX2_RPM_CONFIG, val);
3699
3700 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3701 }
3702 else {
3703 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3704 }
3705
f86e82fb 3706 if (!(bp->flags & BNX2_FLAG_NO_WOL))
a2f13890
MC
3707 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3708 1, 0);
b6016b76
MC
3709
3710 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3711 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3712 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3713
3714 if (bp->wol)
3715 pmcsr |= 3;
3716 }
3717 else {
3718 pmcsr |= 3;
3719 }
3720 if (bp->wol) {
3721 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3722 }
3723 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3724 pmcsr);
3725
3726 /* No more memory access after this point until
3727 * device is brought back to D0.
3728 */
3729 udelay(50);
3730 break;
3731 }
3732 default:
3733 return -EINVAL;
3734 }
3735 return 0;
3736}
3737
3738static int
3739bnx2_acquire_nvram_lock(struct bnx2 *bp)
3740{
3741 u32 val;
3742 int j;
3743
3744 /* Request access to the flash interface. */
3745 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
3746 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3747 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3748 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
3749 break;
3750
3751 udelay(5);
3752 }
3753
3754 if (j >= NVRAM_TIMEOUT_COUNT)
3755 return -EBUSY;
3756
3757 return 0;
3758}
3759
3760static int
3761bnx2_release_nvram_lock(struct bnx2 *bp)
3762{
3763 int j;
3764 u32 val;
3765
3766 /* Relinquish nvram interface. */
3767 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
3768
3769 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3770 val = REG_RD(bp, BNX2_NVM_SW_ARB);
3771 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
3772 break;
3773
3774 udelay(5);
3775 }
3776
3777 if (j >= NVRAM_TIMEOUT_COUNT)
3778 return -EBUSY;
3779
3780 return 0;
3781}
3782
3783
3784static int
3785bnx2_enable_nvram_write(struct bnx2 *bp)
3786{
3787 u32 val;
3788
3789 val = REG_RD(bp, BNX2_MISC_CFG);
3790 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
3791
e30372c9 3792 if (bp->flash_info->flags & BNX2_NV_WREN) {
b6016b76
MC
3793 int j;
3794
3795 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3796 REG_WR(bp, BNX2_NVM_COMMAND,
3797 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
3798
3799 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3800 udelay(5);
3801
3802 val = REG_RD(bp, BNX2_NVM_COMMAND);
3803 if (val & BNX2_NVM_COMMAND_DONE)
3804 break;
3805 }
3806
3807 if (j >= NVRAM_TIMEOUT_COUNT)
3808 return -EBUSY;
3809 }
3810 return 0;
3811}
3812
3813static void
3814bnx2_disable_nvram_write(struct bnx2 *bp)
3815{
3816 u32 val;
3817
3818 val = REG_RD(bp, BNX2_MISC_CFG);
3819 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
3820}
3821
3822
3823static void
3824bnx2_enable_nvram_access(struct bnx2 *bp)
3825{
3826 u32 val;
3827
3828 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3829 /* Enable both bits, even on read. */
6aa20a22 3830 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
b6016b76
MC
3831 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
3832}
3833
3834static void
3835bnx2_disable_nvram_access(struct bnx2 *bp)
3836{
3837 u32 val;
3838
3839 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
3840 /* Disable both bits, even after read. */
6aa20a22 3841 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
b6016b76
MC
3842 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
3843 BNX2_NVM_ACCESS_ENABLE_WR_EN));
3844}
3845
3846static int
3847bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
3848{
3849 u32 cmd;
3850 int j;
3851
e30372c9 3852 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
b6016b76
MC
3853 /* Buffered flash, no erase needed */
3854 return 0;
3855
3856 /* Build an erase command */
3857 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
3858 BNX2_NVM_COMMAND_DOIT;
3859
3860 /* Need to clear DONE bit separately. */
3861 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3862
3863 /* Address of the NVRAM to read from. */
3864 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3865
3866 /* Issue an erase command. */
3867 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3868
3869 /* Wait for completion. */
3870 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3871 u32 val;
3872
3873 udelay(5);
3874
3875 val = REG_RD(bp, BNX2_NVM_COMMAND);
3876 if (val & BNX2_NVM_COMMAND_DONE)
3877 break;
3878 }
3879
3880 if (j >= NVRAM_TIMEOUT_COUNT)
3881 return -EBUSY;
3882
3883 return 0;
3884}
3885
3886static int
3887bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
3888{
3889 u32 cmd;
3890 int j;
3891
3892 /* Build the command word. */
3893 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
3894
e30372c9
MC
3895 /* Calculate an offset of a buffered flash, not needed for 5709. */
3896 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
b6016b76
MC
3897 offset = ((offset / bp->flash_info->page_size) <<
3898 bp->flash_info->page_bits) +
3899 (offset % bp->flash_info->page_size);
3900 }
3901
3902 /* Need to clear DONE bit separately. */
3903 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3904
3905 /* Address of the NVRAM to read from. */
3906 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3907
3908 /* Issue a read command. */
3909 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3910
3911 /* Wait for completion. */
3912 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3913 u32 val;
3914
3915 udelay(5);
3916
3917 val = REG_RD(bp, BNX2_NVM_COMMAND);
3918 if (val & BNX2_NVM_COMMAND_DONE) {
b491edd5
AV
3919 __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
3920 memcpy(ret_val, &v, 4);
b6016b76
MC
3921 break;
3922 }
3923 }
3924 if (j >= NVRAM_TIMEOUT_COUNT)
3925 return -EBUSY;
3926
3927 return 0;
3928}
3929
3930
3931static int
3932bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
3933{
b491edd5
AV
3934 u32 cmd;
3935 __be32 val32;
b6016b76
MC
3936 int j;
3937
3938 /* Build the command word. */
3939 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
3940
e30372c9
MC
3941 /* Calculate an offset of a buffered flash, not needed for 5709. */
3942 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
b6016b76
MC
3943 offset = ((offset / bp->flash_info->page_size) <<
3944 bp->flash_info->page_bits) +
3945 (offset % bp->flash_info->page_size);
3946 }
3947
3948 /* Need to clear DONE bit separately. */
3949 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
3950
3951 memcpy(&val32, val, 4);
b6016b76
MC
3952
3953 /* Write the data. */
b491edd5 3954 REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
b6016b76
MC
3955
3956 /* Address of the NVRAM to write to. */
3957 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
3958
3959 /* Issue the write command. */
3960 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
3961
3962 /* Wait for completion. */
3963 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
3964 udelay(5);
3965
3966 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
3967 break;
3968 }
3969 if (j >= NVRAM_TIMEOUT_COUNT)
3970 return -EBUSY;
3971
3972 return 0;
3973}
3974
3975static int
3976bnx2_init_nvram(struct bnx2 *bp)
3977{
3978 u32 val;
e30372c9 3979 int j, entry_count, rc = 0;
b6016b76
MC
3980 struct flash_spec *flash;
3981
e30372c9
MC
3982 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3983 bp->flash_info = &flash_5709;
3984 goto get_flash_size;
3985 }
3986
b6016b76
MC
3987 /* Determine the selected interface. */
3988 val = REG_RD(bp, BNX2_NVM_CFG1);
3989
ff8ac609 3990 entry_count = ARRAY_SIZE(flash_table);
b6016b76 3991
b6016b76
MC
3992 if (val & 0x40000000) {
3993
3994 /* Flash interface has been reconfigured */
3995 for (j = 0, flash = &flash_table[0]; j < entry_count;
37137709
MC
3996 j++, flash++) {
3997 if ((val & FLASH_BACKUP_STRAP_MASK) ==
3998 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
b6016b76
MC
3999 bp->flash_info = flash;
4000 break;
4001 }
4002 }
4003 }
4004 else {
37137709 4005 u32 mask;
b6016b76
MC
4006 /* Not yet been reconfigured */
4007
37137709
MC
4008 if (val & (1 << 23))
4009 mask = FLASH_BACKUP_STRAP_MASK;
4010 else
4011 mask = FLASH_STRAP_MASK;
4012
b6016b76
MC
4013 for (j = 0, flash = &flash_table[0]; j < entry_count;
4014 j++, flash++) {
4015
37137709 4016 if ((val & mask) == (flash->strapping & mask)) {
b6016b76
MC
4017 bp->flash_info = flash;
4018
4019 /* Request access to the flash interface. */
4020 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4021 return rc;
4022
4023 /* Enable access to flash interface */
4024 bnx2_enable_nvram_access(bp);
4025
4026 /* Reconfigure the flash interface */
4027 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4028 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4029 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4030 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4031
4032 /* Disable access to flash interface */
4033 bnx2_disable_nvram_access(bp);
4034 bnx2_release_nvram_lock(bp);
4035
4036 break;
4037 }
4038 }
4039 } /* if (val & 0x40000000) */
4040
4041 if (j == entry_count) {
4042 bp->flash_info = NULL;
2f23c523 4043 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
1122db71 4044 return -ENODEV;
b6016b76
MC
4045 }
4046
e30372c9 4047get_flash_size:
2726d6e1 4048 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
1122db71
MC
4049 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4050 if (val)
4051 bp->flash_size = val;
4052 else
4053 bp->flash_size = bp->flash_info->total_size;
4054
b6016b76
MC
4055 return rc;
4056}
4057
4058static int
4059bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4060 int buf_size)
4061{
4062 int rc = 0;
4063 u32 cmd_flags, offset32, len32, extra;
4064
4065 if (buf_size == 0)
4066 return 0;
4067
4068 /* Request access to the flash interface. */
4069 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4070 return rc;
4071
4072 /* Enable access to flash interface */
4073 bnx2_enable_nvram_access(bp);
4074
4075 len32 = buf_size;
4076 offset32 = offset;
4077 extra = 0;
4078
4079 cmd_flags = 0;
4080
4081 if (offset32 & 3) {
4082 u8 buf[4];
4083 u32 pre_len;
4084
4085 offset32 &= ~3;
4086 pre_len = 4 - (offset & 3);
4087
4088 if (pre_len >= len32) {
4089 pre_len = len32;
4090 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4091 BNX2_NVM_COMMAND_LAST;
4092 }
4093 else {
4094 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4095 }
4096
4097 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4098
4099 if (rc)
4100 return rc;
4101
4102 memcpy(ret_buf, buf + (offset & 3), pre_len);
4103
4104 offset32 += 4;
4105 ret_buf += pre_len;
4106 len32 -= pre_len;
4107 }
4108 if (len32 & 3) {
4109 extra = 4 - (len32 & 3);
4110 len32 = (len32 + 4) & ~3;
4111 }
4112
4113 if (len32 == 4) {
4114 u8 buf[4];
4115
4116 if (cmd_flags)
4117 cmd_flags = BNX2_NVM_COMMAND_LAST;
4118 else
4119 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4120 BNX2_NVM_COMMAND_LAST;
4121
4122 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4123
4124 memcpy(ret_buf, buf, 4 - extra);
4125 }
4126 else if (len32 > 0) {
4127 u8 buf[4];
4128
4129 /* Read the first word. */
4130 if (cmd_flags)
4131 cmd_flags = 0;
4132 else
4133 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4134
4135 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4136
4137 /* Advance to the next dword. */
4138 offset32 += 4;
4139 ret_buf += 4;
4140 len32 -= 4;
4141
4142 while (len32 > 4 && rc == 0) {
4143 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4144
4145 /* Advance to the next dword. */
4146 offset32 += 4;
4147 ret_buf += 4;
4148 len32 -= 4;
4149 }
4150
4151 if (rc)
4152 return rc;
4153
4154 cmd_flags = BNX2_NVM_COMMAND_LAST;
4155 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4156
4157 memcpy(ret_buf, buf, 4 - extra);
4158 }
4159
4160 /* Disable access to flash interface */
4161 bnx2_disable_nvram_access(bp);
4162
4163 bnx2_release_nvram_lock(bp);
4164
4165 return rc;
4166}
4167
4168static int
4169bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4170 int buf_size)
4171{
4172 u32 written, offset32, len32;
e6be763f 4173 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
b6016b76
MC
4174 int rc = 0;
4175 int align_start, align_end;
4176
4177 buf = data_buf;
4178 offset32 = offset;
4179 len32 = buf_size;
4180 align_start = align_end = 0;
4181
4182 if ((align_start = (offset32 & 3))) {
4183 offset32 &= ~3;
c873879c
MC
4184 len32 += align_start;
4185 if (len32 < 4)
4186 len32 = 4;
b6016b76
MC
4187 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4188 return rc;
4189 }
4190
4191 if (len32 & 3) {
c873879c
MC
4192 align_end = 4 - (len32 & 3);
4193 len32 += align_end;
4194 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4195 return rc;
b6016b76
MC
4196 }
4197
4198 if (align_start || align_end) {
e6be763f
MC
4199 align_buf = kmalloc(len32, GFP_KERNEL);
4200 if (align_buf == NULL)
b6016b76
MC
4201 return -ENOMEM;
4202 if (align_start) {
e6be763f 4203 memcpy(align_buf, start, 4);
b6016b76
MC
4204 }
4205 if (align_end) {
e6be763f 4206 memcpy(align_buf + len32 - 4, end, 4);
b6016b76 4207 }
e6be763f
MC
4208 memcpy(align_buf + align_start, data_buf, buf_size);
4209 buf = align_buf;
b6016b76
MC
4210 }
4211
e30372c9 4212 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
ae181bc4
MC
4213 flash_buffer = kmalloc(264, GFP_KERNEL);
4214 if (flash_buffer == NULL) {
4215 rc = -ENOMEM;
4216 goto nvram_write_end;
4217 }
4218 }
4219
b6016b76
MC
4220 written = 0;
4221 while ((written < len32) && (rc == 0)) {
4222 u32 page_start, page_end, data_start, data_end;
4223 u32 addr, cmd_flags;
4224 int i;
b6016b76
MC
4225
4226 /* Find the page_start addr */
4227 page_start = offset32 + written;
4228 page_start -= (page_start % bp->flash_info->page_size);
4229 /* Find the page_end addr */
4230 page_end = page_start + bp->flash_info->page_size;
4231 /* Find the data_start addr */
4232 data_start = (written == 0) ? offset32 : page_start;
4233 /* Find the data_end addr */
6aa20a22 4234 data_end = (page_end > offset32 + len32) ?
b6016b76
MC
4235 (offset32 + len32) : page_end;
4236
4237 /* Request access to the flash interface. */
4238 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4239 goto nvram_write_end;
4240
4241 /* Enable access to flash interface */
4242 bnx2_enable_nvram_access(bp);
4243
4244 cmd_flags = BNX2_NVM_COMMAND_FIRST;
e30372c9 4245 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
b6016b76
MC
4246 int j;
4247
4248 /* Read the whole page into the buffer
4249 * (non-buffer flash only) */
4250 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4251 if (j == (bp->flash_info->page_size - 4)) {
4252 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4253 }
4254 rc = bnx2_nvram_read_dword(bp,
6aa20a22
JG
4255 page_start + j,
4256 &flash_buffer[j],
b6016b76
MC
4257 cmd_flags);
4258
4259 if (rc)
4260 goto nvram_write_end;
4261
4262 cmd_flags = 0;
4263 }
4264 }
4265
4266 /* Enable writes to flash interface (unlock write-protect) */
4267 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4268 goto nvram_write_end;
4269
b6016b76
MC
4270 /* Loop to write back the buffer data from page_start to
4271 * data_start */
4272 i = 0;
e30372c9 4273 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
c873879c
MC
4274 /* Erase the page */
4275 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4276 goto nvram_write_end;
4277
4278 /* Re-enable the write again for the actual write */
4279 bnx2_enable_nvram_write(bp);
4280
b6016b76
MC
4281 for (addr = page_start; addr < data_start;
4282 addr += 4, i += 4) {
6aa20a22 4283
b6016b76
MC
4284 rc = bnx2_nvram_write_dword(bp, addr,
4285 &flash_buffer[i], cmd_flags);
4286
4287 if (rc != 0)
4288 goto nvram_write_end;
4289
4290 cmd_flags = 0;
4291 }
4292 }
4293
4294 /* Loop to write the new data from data_start to data_end */
bae25761 4295 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
b6016b76 4296 if ((addr == page_end - 4) ||
e30372c9 4297 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
b6016b76
MC
4298 (addr == data_end - 4))) {
4299
4300 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4301 }
4302 rc = bnx2_nvram_write_dword(bp, addr, buf,
4303 cmd_flags);
4304
4305 if (rc != 0)
4306 goto nvram_write_end;
4307
4308 cmd_flags = 0;
4309 buf += 4;
4310 }
4311
4312 /* Loop to write back the buffer data from data_end
4313 * to page_end */
e30372c9 4314 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
b6016b76
MC
4315 for (addr = data_end; addr < page_end;
4316 addr += 4, i += 4) {
6aa20a22 4317
b6016b76
MC
4318 if (addr == page_end-4) {
4319 cmd_flags = BNX2_NVM_COMMAND_LAST;
4320 }
4321 rc = bnx2_nvram_write_dword(bp, addr,
4322 &flash_buffer[i], cmd_flags);
4323
4324 if (rc != 0)
4325 goto nvram_write_end;
4326
4327 cmd_flags = 0;
4328 }
4329 }
4330
4331 /* Disable writes to flash interface (lock write-protect) */
4332 bnx2_disable_nvram_write(bp);
4333
4334 /* Disable access to flash interface */
4335 bnx2_disable_nvram_access(bp);
4336 bnx2_release_nvram_lock(bp);
4337
4338 /* Increment written */
4339 written += data_end - data_start;
4340 }
4341
4342nvram_write_end:
e6be763f
MC
4343 kfree(flash_buffer);
4344 kfree(align_buf);
b6016b76
MC
4345 return rc;
4346}
4347
0d8a6571 4348static void
7c62e83b 4349bnx2_init_fw_cap(struct bnx2 *bp)
0d8a6571 4350{
7c62e83b 4351 u32 val, sig = 0;
0d8a6571 4352
583c28e5 4353 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
7c62e83b
MC
4354 bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4355
4356 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4357 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
0d8a6571 4358
2726d6e1 4359 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
0d8a6571
MC
4360 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4361 return;
4362
7c62e83b
MC
4363 if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4364 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4365 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4366 }
4367
4368 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4369 (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4370 u32 link;
4371
583c28e5 4372 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
0d8a6571 4373
7c62e83b
MC
4374 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4375 if (link & BNX2_LINK_STATUS_SERDES_LINK)
0d8a6571
MC
4376 bp->phy_port = PORT_FIBRE;
4377 else
4378 bp->phy_port = PORT_TP;
489310a4 4379
7c62e83b
MC
4380 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4381 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
0d8a6571 4382 }
7c62e83b
MC
4383
4384 if (netif_running(bp->dev) && sig)
4385 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
0d8a6571
MC
4386}
4387
b4b36042
MC
4388static void
4389bnx2_setup_msix_tbl(struct bnx2 *bp)
4390{
4391 REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4392
4393 REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4394 REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4395}
4396
b6016b76
MC
4397static int
4398bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4399{
4400 u32 val;
4401 int i, rc = 0;
489310a4 4402 u8 old_port;
b6016b76
MC
4403
4404 /* Wait for the current PCI transaction to complete before
4405 * issuing a reset. */
4406 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4407 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4408 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4409 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4410 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4411 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4412 udelay(5);
4413
b090ae2b 4414 /* Wait for the firmware to tell us it is ok to issue a reset. */
a2f13890 4415 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
b090ae2b 4416
b6016b76
MC
4417 /* Deposit a driver reset signature so the firmware knows that
4418 * this is a soft reset. */
2726d6e1
MC
4419 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4420 BNX2_DRV_RESET_SIGNATURE_MAGIC);
b6016b76 4421
b6016b76
MC
4422 /* Do a dummy read to force the chip to complete all current transaction
4423 * before we issue a reset. */
4424 val = REG_RD(bp, BNX2_MISC_ID);
4425
234754d5
MC
4426 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4427 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4428 REG_RD(bp, BNX2_MISC_COMMAND);
4429 udelay(5);
b6016b76 4430
234754d5
MC
4431 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4432 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
b6016b76 4433
234754d5 4434 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
b6016b76 4435
234754d5
MC
4436 } else {
4437 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4438 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4439 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4440
4441 /* Chip reset. */
4442 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4443
594a9dfa
MC
4444 /* Reading back any register after chip reset will hang the
4445 * bus on 5706 A0 and A1. The msleep below provides plenty
4446 * of margin for write posting.
4447 */
234754d5 4448 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
8e545881
AV
4449 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4450 msleep(20);
b6016b76 4451
234754d5
MC
4452 /* Reset takes approximate 30 usec */
4453 for (i = 0; i < 10; i++) {
4454 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4455 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4456 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4457 break;
4458 udelay(10);
4459 }
4460
4461 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4462 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4463 printk(KERN_ERR PFX "Chip reset did not complete\n");
4464 return -EBUSY;
4465 }
b6016b76
MC
4466 }
4467
4468 /* Make sure byte swapping is properly configured. */
4469 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4470 if (val != 0x01020304) {
4471 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4472 return -ENODEV;
4473 }
4474
b6016b76 4475 /* Wait for the firmware to finish its initialization. */
a2f13890 4476 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
b090ae2b
MC
4477 if (rc)
4478 return rc;
b6016b76 4479
0d8a6571 4480 spin_lock_bh(&bp->phy_lock);
489310a4 4481 old_port = bp->phy_port;
7c62e83b 4482 bnx2_init_fw_cap(bp);
583c28e5
MC
4483 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4484 old_port != bp->phy_port)
0d8a6571
MC
4485 bnx2_set_default_remote_link(bp);
4486 spin_unlock_bh(&bp->phy_lock);
4487
b6016b76
MC
4488 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4489 /* Adjust the voltage regular to two steps lower. The default
4490 * of this register is 0x0000000e. */
4491 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4492
4493 /* Remove bad rbuf memory from the free pool. */
4494 rc = bnx2_alloc_bad_rbuf(bp);
4495 }
4496
f86e82fb 4497 if (bp->flags & BNX2_FLAG_USING_MSIX)
b4b36042
MC
4498 bnx2_setup_msix_tbl(bp);
4499
b6016b76
MC
4500 return rc;
4501}
4502
4503static int
4504bnx2_init_chip(struct bnx2 *bp)
4505{
d8026d93 4506 u32 val, mtu;
b4b36042 4507 int rc, i;
b6016b76
MC
4508
4509 /* Make sure the interrupt is not active. */
4510 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4511
4512 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4513 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4514#ifdef __BIG_ENDIAN
6aa20a22 4515 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
b6016b76 4516#endif
6aa20a22 4517 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
b6016b76
MC
4518 DMA_READ_CHANS << 12 |
4519 DMA_WRITE_CHANS << 16;
4520
4521 val |= (0x2 << 20) | (1 << 11);
4522
f86e82fb 4523 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
b6016b76
MC
4524 val |= (1 << 23);
4525
4526 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
f86e82fb 4527 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
b6016b76
MC
4528 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4529
4530 REG_WR(bp, BNX2_DMA_CONFIG, val);
4531
4532 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4533 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4534 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4535 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4536 }
4537
f86e82fb 4538 if (bp->flags & BNX2_FLAG_PCIX) {
b6016b76
MC
4539 u16 val16;
4540
4541 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4542 &val16);
4543 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4544 val16 & ~PCI_X_CMD_ERO);
4545 }
4546
4547 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4548 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4549 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4550 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4551
4552 /* Initialize context mapping and zero out the quick contexts. The
4553 * context block must have already been enabled. */
641bdcd5
MC
4554 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4555 rc = bnx2_init_5709_context(bp);
4556 if (rc)
4557 return rc;
4558 } else
59b47d8a 4559 bnx2_init_context(bp);
b6016b76 4560
fba9fe91
MC
4561 if ((rc = bnx2_init_cpus(bp)) != 0)
4562 return rc;
4563
b6016b76
MC
4564 bnx2_init_nvram(bp);
4565
5fcaed01 4566 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
b6016b76
MC
4567
4568 val = REG_RD(bp, BNX2_MQ_CONFIG);
4569 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4570 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
68c9f75a
MC
4571 if (CHIP_ID(bp) == CHIP_ID_5709_A0 || CHIP_ID(bp) == CHIP_ID_5709_A1)
4572 val |= BNX2_MQ_CONFIG_HALT_DIS;
4573
b6016b76
MC
4574 REG_WR(bp, BNX2_MQ_CONFIG, val);
4575
4576 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4577 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4578 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4579
4580 val = (BCM_PAGE_BITS - 8) << 24;
4581 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4582
4583 /* Configure page size. */
4584 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4585 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4586 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4587 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4588
4589 val = bp->mac_addr[0] +
4590 (bp->mac_addr[1] << 8) +
4591 (bp->mac_addr[2] << 16) +
4592 bp->mac_addr[3] +
4593 (bp->mac_addr[4] << 8) +
4594 (bp->mac_addr[5] << 16);
4595 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4596
4597 /* Program the MTU. Also include 4 bytes for CRC32. */
d8026d93
MC
4598 mtu = bp->dev->mtu;
4599 val = mtu + ETH_HLEN + ETH_FCS_LEN;
b6016b76
MC
4600 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4601 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4602 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4603
d8026d93
MC
4604 if (mtu < 1500)
4605 mtu = 1500;
4606
4607 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4608 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4609 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4610
b4b36042
MC
4611 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4612 bp->bnx2_napi[i].last_status_idx = 0;
4613
efba0180
MC
4614 bp->idle_chk_status_idx = 0xffff;
4615
b6016b76
MC
4616 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4617
4618 /* Set up how to generate a link change interrupt. */
4619 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4620
4621 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4622 (u64) bp->status_blk_mapping & 0xffffffff);
4623 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4624
4625 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4626 (u64) bp->stats_blk_mapping & 0xffffffff);
4627 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4628 (u64) bp->stats_blk_mapping >> 32);
4629
6aa20a22 4630 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
b6016b76
MC
4631 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4632
4633 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4634 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4635
4636 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4637 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4638
4639 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4640
4641 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4642
4643 REG_WR(bp, BNX2_HC_COM_TICKS,
4644 (bp->com_ticks_int << 16) | bp->com_ticks);
4645
4646 REG_WR(bp, BNX2_HC_CMD_TICKS,
4647 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4648
02537b06
MC
4649 if (CHIP_NUM(bp) == CHIP_NUM_5708)
4650 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4651 else
7ea6920e 4652 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
b6016b76
MC
4653 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4654
4655 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
8e6a72c4 4656 val = BNX2_HC_CONFIG_COLLECT_STATS;
b6016b76 4657 else {
8e6a72c4
MC
4658 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4659 BNX2_HC_CONFIG_COLLECT_STATS;
b6016b76
MC
4660 }
4661
5e9ad9e1 4662 if (bp->irq_nvecs > 1) {
c76c0475
MC
4663 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4664 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4665
5e9ad9e1
MC
4666 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4667 }
4668
4669 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4670 val |= BNX2_HC_CONFIG_ONE_SHOT;
4671
4672 REG_WR(bp, BNX2_HC_CONFIG, val);
4673
4674 for (i = 1; i < bp->irq_nvecs; i++) {
4675 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4676 BNX2_HC_SB_CONFIG_1;
4677
6f743ca0 4678 REG_WR(bp, base,
c76c0475 4679 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5e9ad9e1 4680 BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
c76c0475
MC
4681 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4682
6f743ca0 4683 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
c76c0475
MC
4684 (bp->tx_quick_cons_trip_int << 16) |
4685 bp->tx_quick_cons_trip);
4686
6f743ca0 4687 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
c76c0475
MC
4688 (bp->tx_ticks_int << 16) | bp->tx_ticks);
4689
5e9ad9e1
MC
4690 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4691 (bp->rx_quick_cons_trip_int << 16) |
4692 bp->rx_quick_cons_trip);
8e6a72c4 4693
5e9ad9e1
MC
4694 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4695 (bp->rx_ticks_int << 16) | bp->rx_ticks);
4696 }
8e6a72c4 4697
b6016b76
MC
4698 /* Clear internal stats counters. */
4699 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4700
da3e4fbe 4701 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
b6016b76
MC
4702
4703 /* Initialize the receive filter. */
4704 bnx2_set_rx_mode(bp->dev);
4705
0aa38df7
MC
4706 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4707 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4708 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4709 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4710 }
b090ae2b 4711 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
a2f13890 4712 1, 0);
b6016b76 4713
df149d70 4714 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
b6016b76
MC
4715 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4716
4717 udelay(20);
4718
bf5295bb
MC
4719 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4720
b090ae2b 4721 return rc;
b6016b76
MC
4722}
4723
c76c0475
MC
4724static void
4725bnx2_clear_ring_states(struct bnx2 *bp)
4726{
4727 struct bnx2_napi *bnapi;
35e9010b 4728 struct bnx2_tx_ring_info *txr;
bb4f98ab 4729 struct bnx2_rx_ring_info *rxr;
c76c0475
MC
4730 int i;
4731
4732 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4733 bnapi = &bp->bnx2_napi[i];
35e9010b 4734 txr = &bnapi->tx_ring;
bb4f98ab 4735 rxr = &bnapi->rx_ring;
c76c0475 4736
35e9010b
MC
4737 txr->tx_cons = 0;
4738 txr->hw_tx_cons = 0;
bb4f98ab
MC
4739 rxr->rx_prod_bseq = 0;
4740 rxr->rx_prod = 0;
4741 rxr->rx_cons = 0;
4742 rxr->rx_pg_prod = 0;
4743 rxr->rx_pg_cons = 0;
c76c0475
MC
4744 }
4745}
4746
59b47d8a 4747static void
35e9010b 4748bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
59b47d8a
MC
4749{
4750 u32 val, offset0, offset1, offset2, offset3;
62a8313c 4751 u32 cid_addr = GET_CID_ADDR(cid);
59b47d8a
MC
4752
4753 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4754 offset0 = BNX2_L2CTX_TYPE_XI;
4755 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
4756 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
4757 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
4758 } else {
4759 offset0 = BNX2_L2CTX_TYPE;
4760 offset1 = BNX2_L2CTX_CMD_TYPE;
4761 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
4762 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
4763 }
4764 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
62a8313c 4765 bnx2_ctx_wr(bp, cid_addr, offset0, val);
59b47d8a
MC
4766
4767 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
62a8313c 4768 bnx2_ctx_wr(bp, cid_addr, offset1, val);
59b47d8a 4769
35e9010b 4770 val = (u64) txr->tx_desc_mapping >> 32;
62a8313c 4771 bnx2_ctx_wr(bp, cid_addr, offset2, val);
59b47d8a 4772
35e9010b 4773 val = (u64) txr->tx_desc_mapping & 0xffffffff;
62a8313c 4774 bnx2_ctx_wr(bp, cid_addr, offset3, val);
59b47d8a 4775}
b6016b76
MC
4776
4777static void
35e9010b 4778bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
b6016b76
MC
4779{
4780 struct tx_bd *txbd;
c76c0475
MC
4781 u32 cid = TX_CID;
4782 struct bnx2_napi *bnapi;
35e9010b 4783 struct bnx2_tx_ring_info *txr;
c76c0475 4784
35e9010b
MC
4785 bnapi = &bp->bnx2_napi[ring_num];
4786 txr = &bnapi->tx_ring;
4787
4788 if (ring_num == 0)
4789 cid = TX_CID;
4790 else
4791 cid = TX_TSS_CID + ring_num - 1;
b6016b76 4792
2f8af120
MC
4793 bp->tx_wake_thresh = bp->tx_ring_size / 2;
4794
35e9010b 4795 txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
6aa20a22 4796
35e9010b
MC
4797 txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
4798 txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
b6016b76 4799
35e9010b
MC
4800 txr->tx_prod = 0;
4801 txr->tx_prod_bseq = 0;
6aa20a22 4802
35e9010b
MC
4803 txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
4804 txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
b6016b76 4805
35e9010b 4806 bnx2_init_tx_context(bp, cid, txr);
b6016b76
MC
4807}
4808
4809static void
5d5d0015
MC
4810bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
4811 int num_rings)
b6016b76 4812{
b6016b76 4813 int i;
5d5d0015 4814 struct rx_bd *rxbd;
6aa20a22 4815
5d5d0015 4816 for (i = 0; i < num_rings; i++) {
13daffa2 4817 int j;
b6016b76 4818
5d5d0015 4819 rxbd = &rx_ring[i][0];
13daffa2 4820 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5d5d0015 4821 rxbd->rx_bd_len = buf_size;
13daffa2
MC
4822 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
4823 }
5d5d0015 4824 if (i == (num_rings - 1))
13daffa2
MC
4825 j = 0;
4826 else
4827 j = i + 1;
5d5d0015
MC
4828 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
4829 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
13daffa2 4830 }
5d5d0015
MC
4831}
4832
4833static void
bb4f98ab 4834bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5d5d0015
MC
4835{
4836 int i;
4837 u16 prod, ring_prod;
bb4f98ab
MC
4838 u32 cid, rx_cid_addr, val;
4839 struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
4840 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
4841
4842 if (ring_num == 0)
4843 cid = RX_CID;
4844 else
4845 cid = RX_RSS_CID + ring_num - 1;
4846
4847 rx_cid_addr = GET_CID_ADDR(cid);
5d5d0015 4848
bb4f98ab 4849 bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5d5d0015
MC
4850 bp->rx_buf_use_size, bp->rx_max_ring);
4851
bb4f98ab 4852 bnx2_init_rx_context(bp, cid);
83e3fc89
MC
4853
4854 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4855 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
4856 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
4857 }
4858
62a8313c 4859 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
47bf4246 4860 if (bp->rx_pg_ring_size) {
bb4f98ab
MC
4861 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
4862 rxr->rx_pg_desc_mapping,
47bf4246
MC
4863 PAGE_SIZE, bp->rx_max_pg_ring);
4864 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
62a8313c
MC
4865 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
4866 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5e9ad9e1 4867 BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
47bf4246 4868
bb4f98ab 4869 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
62a8313c 4870 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
47bf4246 4871
bb4f98ab 4872 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
62a8313c 4873 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
47bf4246
MC
4874
4875 if (CHIP_NUM(bp) == CHIP_NUM_5709)
4876 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
4877 }
b6016b76 4878
bb4f98ab 4879 val = (u64) rxr->rx_desc_mapping[0] >> 32;
62a8313c 4880 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
b6016b76 4881
bb4f98ab 4882 val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
62a8313c 4883 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
b6016b76 4884
bb4f98ab 4885 ring_prod = prod = rxr->rx_pg_prod;
47bf4246 4886 for (i = 0; i < bp->rx_pg_ring_size; i++) {
bb4f98ab 4887 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0)
47bf4246
MC
4888 break;
4889 prod = NEXT_RX_BD(prod);
4890 ring_prod = RX_PG_RING_IDX(prod);
4891 }
bb4f98ab 4892 rxr->rx_pg_prod = prod;
47bf4246 4893
bb4f98ab 4894 ring_prod = prod = rxr->rx_prod;
236b6394 4895 for (i = 0; i < bp->rx_ring_size; i++) {
bb4f98ab 4896 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0)
b6016b76 4897 break;
b6016b76
MC
4898 prod = NEXT_RX_BD(prod);
4899 ring_prod = RX_RING_IDX(prod);
4900 }
bb4f98ab 4901 rxr->rx_prod = prod;
b6016b76 4902
bb4f98ab
MC
4903 rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
4904 rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
4905 rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
b6016b76 4906
bb4f98ab
MC
4907 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
4908 REG_WR16(bp, rxr->rx_bidx_addr, prod);
4909
4910 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
b6016b76
MC
4911}
4912
35e9010b
MC
4913static void
4914bnx2_init_all_rings(struct bnx2 *bp)
4915{
4916 int i;
5e9ad9e1 4917 u32 val;
35e9010b
MC
4918
4919 bnx2_clear_ring_states(bp);
4920
4921 REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
4922 for (i = 0; i < bp->num_tx_rings; i++)
4923 bnx2_init_tx_ring(bp, i);
4924
4925 if (bp->num_tx_rings > 1)
4926 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
4927 (TX_TSS_CID << 7));
4928
5e9ad9e1
MC
4929 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
4930 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
4931
bb4f98ab
MC
4932 for (i = 0; i < bp->num_rx_rings; i++)
4933 bnx2_init_rx_ring(bp, i);
5e9ad9e1
MC
4934
4935 if (bp->num_rx_rings > 1) {
4936 u32 tbl_32;
4937 u8 *tbl = (u8 *) &tbl_32;
4938
4939 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
4940 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
4941
4942 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
4943 tbl[i % 4] = i % (bp->num_rx_rings - 1);
4944 if ((i % 4) == 3)
4945 bnx2_reg_wr_ind(bp,
4946 BNX2_RXP_SCRATCH_RSS_TBL + i,
4947 cpu_to_be32(tbl_32));
4948 }
4949
4950 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
4951 BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
4952
4953 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
4954
4955 }
35e9010b
MC
4956}
4957
5d5d0015 4958static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
13daffa2 4959{
5d5d0015 4960 u32 max, num_rings = 1;
13daffa2 4961
5d5d0015
MC
4962 while (ring_size > MAX_RX_DESC_CNT) {
4963 ring_size -= MAX_RX_DESC_CNT;
13daffa2
MC
4964 num_rings++;
4965 }
4966 /* round to next power of 2 */
5d5d0015 4967 max = max_size;
13daffa2
MC
4968 while ((max & num_rings) == 0)
4969 max >>= 1;
4970
4971 if (num_rings != max)
4972 max <<= 1;
4973
5d5d0015
MC
4974 return max;
4975}
4976
4977static void
4978bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
4979{
84eaa187 4980 u32 rx_size, rx_space, jumbo_size;
5d5d0015
MC
4981
4982 /* 8 for CRC and VLAN */
d89cb6af 4983 rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5d5d0015 4984
84eaa187
MC
4985 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
4986 sizeof(struct skb_shared_info);
4987
601d3d18 4988 bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
47bf4246
MC
4989 bp->rx_pg_ring_size = 0;
4990 bp->rx_max_pg_ring = 0;
4991 bp->rx_max_pg_ring_idx = 0;
f86e82fb 4992 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
84eaa187
MC
4993 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
4994
4995 jumbo_size = size * pages;
4996 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
4997 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
4998
4999 bp->rx_pg_ring_size = jumbo_size;
5000 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5001 MAX_RX_PG_RINGS);
5002 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
601d3d18 5003 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
84eaa187
MC
5004 bp->rx_copy_thresh = 0;
5005 }
5d5d0015
MC
5006
5007 bp->rx_buf_use_size = rx_size;
5008 /* hw alignment */
5009 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
d89cb6af 5010 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5d5d0015
MC
5011 bp->rx_ring_size = size;
5012 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
13daffa2
MC
5013 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5014}
5015
b6016b76
MC
5016static void
5017bnx2_free_tx_skbs(struct bnx2 *bp)
5018{
5019 int i;
5020
35e9010b
MC
5021 for (i = 0; i < bp->num_tx_rings; i++) {
5022 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5023 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5024 int j;
b6016b76 5025
35e9010b 5026 if (txr->tx_buf_ring == NULL)
b6016b76 5027 continue;
b6016b76 5028
35e9010b 5029 for (j = 0; j < TX_DESC_CNT; ) {
3d16af86 5030 struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
35e9010b 5031 struct sk_buff *skb = tx_buf->skb;
35e9010b
MC
5032
5033 if (skb == NULL) {
5034 j++;
5035 continue;
5036 }
5037
3d16af86 5038 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
b6016b76 5039
35e9010b 5040 tx_buf->skb = NULL;
b6016b76 5041
3d16af86 5042 j += skb_shinfo(skb)->nr_frags + 1;
35e9010b 5043 dev_kfree_skb(skb);
b6016b76 5044 }
b6016b76 5045 }
b6016b76
MC
5046}
5047
5048static void
5049bnx2_free_rx_skbs(struct bnx2 *bp)
5050{
5051 int i;
5052
bb4f98ab
MC
5053 for (i = 0; i < bp->num_rx_rings; i++) {
5054 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5055 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5056 int j;
b6016b76 5057
bb4f98ab
MC
5058 if (rxr->rx_buf_ring == NULL)
5059 return;
b6016b76 5060
bb4f98ab
MC
5061 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5062 struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5063 struct sk_buff *skb = rx_buf->skb;
b6016b76 5064
bb4f98ab
MC
5065 if (skb == NULL)
5066 continue;
b6016b76 5067
bb4f98ab
MC
5068 pci_unmap_single(bp->pdev,
5069 pci_unmap_addr(rx_buf, mapping),
5070 bp->rx_buf_use_size,
5071 PCI_DMA_FROMDEVICE);
b6016b76 5072
bb4f98ab
MC
5073 rx_buf->skb = NULL;
5074
5075 dev_kfree_skb(skb);
5076 }
5077 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5078 bnx2_free_rx_page(bp, rxr, j);
b6016b76
MC
5079 }
5080}
5081
5082static void
5083bnx2_free_skbs(struct bnx2 *bp)
5084{
5085 bnx2_free_tx_skbs(bp);
5086 bnx2_free_rx_skbs(bp);
5087}
5088
5089static int
5090bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5091{
5092 int rc;
5093
5094 rc = bnx2_reset_chip(bp, reset_code);
5095 bnx2_free_skbs(bp);
5096 if (rc)
5097 return rc;
5098
fba9fe91
MC
5099 if ((rc = bnx2_init_chip(bp)) != 0)
5100 return rc;
5101
35e9010b 5102 bnx2_init_all_rings(bp);
b6016b76
MC
5103 return 0;
5104}
5105
5106static int
9a120bc5 5107bnx2_init_nic(struct bnx2 *bp, int reset_phy)
b6016b76
MC
5108{
5109 int rc;
5110
5111 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5112 return rc;
5113
80be4434 5114 spin_lock_bh(&bp->phy_lock);
9a120bc5 5115 bnx2_init_phy(bp, reset_phy);
b6016b76 5116 bnx2_set_link(bp);
543a827d
MC
5117 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5118 bnx2_remote_phy_event(bp);
0d8a6571 5119 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5120 return 0;
5121}
5122
74bf4ba3
MC
5123static int
5124bnx2_shutdown_chip(struct bnx2 *bp)
5125{
5126 u32 reset_code;
5127
5128 if (bp->flags & BNX2_FLAG_NO_WOL)
5129 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5130 else if (bp->wol)
5131 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5132 else
5133 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5134
5135 return bnx2_reset_chip(bp, reset_code);
5136}
5137
b6016b76
MC
5138static int
5139bnx2_test_registers(struct bnx2 *bp)
5140{
5141 int ret;
5bae30c9 5142 int i, is_5709;
f71e1309 5143 static const struct {
b6016b76
MC
5144 u16 offset;
5145 u16 flags;
5bae30c9 5146#define BNX2_FL_NOT_5709 1
b6016b76
MC
5147 u32 rw_mask;
5148 u32 ro_mask;
5149 } reg_tbl[] = {
5150 { 0x006c, 0, 0x00000000, 0x0000003f },
5151 { 0x0090, 0, 0xffffffff, 0x00000000 },
5152 { 0x0094, 0, 0x00000000, 0x00000000 },
5153
5bae30c9
MC
5154 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5155 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5156 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5157 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5158 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5159 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5160 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5161 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5162 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5163
5164 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5165 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5166 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5167 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5168 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5169 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5170
5171 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5172 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5173 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
b6016b76
MC
5174
5175 { 0x1000, 0, 0x00000000, 0x00000001 },
15b169cc 5176 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
b6016b76
MC
5177
5178 { 0x1408, 0, 0x01c00800, 0x00000000 },
5179 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5180 { 0x14a8, 0, 0x00000000, 0x000001ff },
5b0c76ad 5181 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
b6016b76
MC
5182 { 0x14b0, 0, 0x00000002, 0x00000001 },
5183 { 0x14b8, 0, 0x00000000, 0x00000000 },
5184 { 0x14c0, 0, 0x00000000, 0x00000009 },
5185 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5186 { 0x14cc, 0, 0x00000000, 0x00000001 },
5187 { 0x14d0, 0, 0xffffffff, 0x00000000 },
b6016b76
MC
5188
5189 { 0x1800, 0, 0x00000000, 0x00000001 },
5190 { 0x1804, 0, 0x00000000, 0x00000003 },
b6016b76
MC
5191
5192 { 0x2800, 0, 0x00000000, 0x00000001 },
5193 { 0x2804, 0, 0x00000000, 0x00003f01 },
5194 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5195 { 0x2810, 0, 0xffff0000, 0x00000000 },
5196 { 0x2814, 0, 0xffff0000, 0x00000000 },
5197 { 0x2818, 0, 0xffff0000, 0x00000000 },
5198 { 0x281c, 0, 0xffff0000, 0x00000000 },
5199 { 0x2834, 0, 0xffffffff, 0x00000000 },
5200 { 0x2840, 0, 0x00000000, 0xffffffff },
5201 { 0x2844, 0, 0x00000000, 0xffffffff },
5202 { 0x2848, 0, 0xffffffff, 0x00000000 },
5203 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5204
5205 { 0x2c00, 0, 0x00000000, 0x00000011 },
5206 { 0x2c04, 0, 0x00000000, 0x00030007 },
5207
b6016b76
MC
5208 { 0x3c00, 0, 0x00000000, 0x00000001 },
5209 { 0x3c04, 0, 0x00000000, 0x00070000 },
5210 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5211 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5212 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5213 { 0x3c14, 0, 0x00000000, 0xffffffff },
5214 { 0x3c18, 0, 0x00000000, 0xffffffff },
5215 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5216 { 0x3c20, 0, 0xffffff00, 0x00000000 },
b6016b76
MC
5217
5218 { 0x5004, 0, 0x00000000, 0x0000007f },
5219 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
b6016b76 5220
b6016b76
MC
5221 { 0x5c00, 0, 0x00000000, 0x00000001 },
5222 { 0x5c04, 0, 0x00000000, 0x0003000f },
5223 { 0x5c08, 0, 0x00000003, 0x00000000 },
5224 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5225 { 0x5c10, 0, 0x00000000, 0xffffffff },
5226 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5227 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5228 { 0x5c88, 0, 0x00000000, 0x00077373 },
5229 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5230
5231 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5232 { 0x680c, 0, 0xffffffff, 0x00000000 },
5233 { 0x6810, 0, 0xffffffff, 0x00000000 },
5234 { 0x6814, 0, 0xffffffff, 0x00000000 },
5235 { 0x6818, 0, 0xffffffff, 0x00000000 },
5236 { 0x681c, 0, 0xffffffff, 0x00000000 },
5237 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5238 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5239 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5240 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5241 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5242 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5243 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5244 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5245 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5246 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5247 { 0x684c, 0, 0xffffffff, 0x00000000 },
5248 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5249 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5250 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5251 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5252 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5253 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5254
5255 { 0xffff, 0, 0x00000000, 0x00000000 },
5256 };
5257
5258 ret = 0;
5bae30c9
MC
5259 is_5709 = 0;
5260 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5261 is_5709 = 1;
5262
b6016b76
MC
5263 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5264 u32 offset, rw_mask, ro_mask, save_val, val;
5bae30c9
MC
5265 u16 flags = reg_tbl[i].flags;
5266
5267 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5268 continue;
b6016b76
MC
5269
5270 offset = (u32) reg_tbl[i].offset;
5271 rw_mask = reg_tbl[i].rw_mask;
5272 ro_mask = reg_tbl[i].ro_mask;
5273
14ab9b86 5274 save_val = readl(bp->regview + offset);
b6016b76 5275
14ab9b86 5276 writel(0, bp->regview + offset);
b6016b76 5277
14ab9b86 5278 val = readl(bp->regview + offset);
b6016b76
MC
5279 if ((val & rw_mask) != 0) {
5280 goto reg_test_err;
5281 }
5282
5283 if ((val & ro_mask) != (save_val & ro_mask)) {
5284 goto reg_test_err;
5285 }
5286
14ab9b86 5287 writel(0xffffffff, bp->regview + offset);
b6016b76 5288
14ab9b86 5289 val = readl(bp->regview + offset);
b6016b76
MC
5290 if ((val & rw_mask) != rw_mask) {
5291 goto reg_test_err;
5292 }
5293
5294 if ((val & ro_mask) != (save_val & ro_mask)) {
5295 goto reg_test_err;
5296 }
5297
14ab9b86 5298 writel(save_val, bp->regview + offset);
b6016b76
MC
5299 continue;
5300
5301reg_test_err:
14ab9b86 5302 writel(save_val, bp->regview + offset);
b6016b76
MC
5303 ret = -ENODEV;
5304 break;
5305 }
5306 return ret;
5307}
5308
5309static int
5310bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5311{
f71e1309 5312 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
b6016b76
MC
5313 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5314 int i;
5315
5316 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5317 u32 offset;
5318
5319 for (offset = 0; offset < size; offset += 4) {
5320
2726d6e1 5321 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
b6016b76 5322
2726d6e1 5323 if (bnx2_reg_rd_ind(bp, start + offset) !=
b6016b76
MC
5324 test_pattern[i]) {
5325 return -ENODEV;
5326 }
5327 }
5328 }
5329 return 0;
5330}
5331
5332static int
5333bnx2_test_memory(struct bnx2 *bp)
5334{
5335 int ret = 0;
5336 int i;
5bae30c9 5337 static struct mem_entry {
b6016b76
MC
5338 u32 offset;
5339 u32 len;
5bae30c9 5340 } mem_tbl_5706[] = {
b6016b76 5341 { 0x60000, 0x4000 },
5b0c76ad 5342 { 0xa0000, 0x3000 },
b6016b76
MC
5343 { 0xe0000, 0x4000 },
5344 { 0x120000, 0x4000 },
5345 { 0x1a0000, 0x4000 },
5346 { 0x160000, 0x4000 },
5347 { 0xffffffff, 0 },
5bae30c9
MC
5348 },
5349 mem_tbl_5709[] = {
5350 { 0x60000, 0x4000 },
5351 { 0xa0000, 0x3000 },
5352 { 0xe0000, 0x4000 },
5353 { 0x120000, 0x4000 },
5354 { 0x1a0000, 0x4000 },
5355 { 0xffffffff, 0 },
b6016b76 5356 };
5bae30c9
MC
5357 struct mem_entry *mem_tbl;
5358
5359 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5360 mem_tbl = mem_tbl_5709;
5361 else
5362 mem_tbl = mem_tbl_5706;
b6016b76
MC
5363
5364 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5365 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5366 mem_tbl[i].len)) != 0) {
5367 return ret;
5368 }
5369 }
6aa20a22 5370
b6016b76
MC
5371 return ret;
5372}
5373
bc5a0690
MC
5374#define BNX2_MAC_LOOPBACK 0
5375#define BNX2_PHY_LOOPBACK 1
5376
b6016b76 5377static int
bc5a0690 5378bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
b6016b76
MC
5379{
5380 unsigned int pkt_size, num_pkts, i;
5381 struct sk_buff *skb, *rx_skb;
5382 unsigned char *packet;
bc5a0690 5383 u16 rx_start_idx, rx_idx;
b6016b76
MC
5384 dma_addr_t map;
5385 struct tx_bd *txbd;
5386 struct sw_bd *rx_buf;
5387 struct l2_fhdr *rx_hdr;
5388 int ret = -ENODEV;
c76c0475 5389 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
35e9010b 5390 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
bb4f98ab 5391 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
c76c0475
MC
5392
5393 tx_napi = bnapi;
b6016b76 5394
35e9010b 5395 txr = &tx_napi->tx_ring;
bb4f98ab 5396 rxr = &bnapi->rx_ring;
bc5a0690
MC
5397 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5398 bp->loopback = MAC_LOOPBACK;
5399 bnx2_set_mac_loopback(bp);
5400 }
5401 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
583c28e5 5402 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
489310a4
MC
5403 return 0;
5404
80be4434 5405 bp->loopback = PHY_LOOPBACK;
bc5a0690
MC
5406 bnx2_set_phy_loopback(bp);
5407 }
5408 else
5409 return -EINVAL;
b6016b76 5410
84eaa187 5411 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
932f3772 5412 skb = netdev_alloc_skb(bp->dev, pkt_size);
b6cbc3b6
JL
5413 if (!skb)
5414 return -ENOMEM;
b6016b76 5415 packet = skb_put(skb, pkt_size);
6634292b 5416 memcpy(packet, bp->dev->dev_addr, 6);
b6016b76
MC
5417 memset(packet + 6, 0x0, 8);
5418 for (i = 14; i < pkt_size; i++)
5419 packet[i] = (unsigned char) (i & 0xff);
5420
3d16af86
BL
5421 if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
5422 dev_kfree_skb(skb);
5423 return -EIO;
5424 }
5425 map = skb_shinfo(skb)->dma_maps[0];
b6016b76 5426
bf5295bb
MC
5427 REG_WR(bp, BNX2_HC_COMMAND,
5428 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5429
b6016b76
MC
5430 REG_RD(bp, BNX2_HC_COMMAND);
5431
5432 udelay(5);
35efa7c1 5433 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
b6016b76 5434
b6016b76
MC
5435 num_pkts = 0;
5436
35e9010b 5437 txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
b6016b76
MC
5438
5439 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5440 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5441 txbd->tx_bd_mss_nbytes = pkt_size;
5442 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5443
5444 num_pkts++;
35e9010b
MC
5445 txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5446 txr->tx_prod_bseq += pkt_size;
b6016b76 5447
35e9010b
MC
5448 REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5449 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
b6016b76
MC
5450
5451 udelay(100);
5452
bf5295bb
MC
5453 REG_WR(bp, BNX2_HC_COMMAND,
5454 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5455
b6016b76
MC
5456 REG_RD(bp, BNX2_HC_COMMAND);
5457
5458 udelay(5);
5459
3d16af86 5460 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
745720e5 5461 dev_kfree_skb(skb);
b6016b76 5462
35e9010b 5463 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
b6016b76 5464 goto loopback_test_done;
b6016b76 5465
35efa7c1 5466 rx_idx = bnx2_get_hw_rx_cons(bnapi);
b6016b76
MC
5467 if (rx_idx != rx_start_idx + num_pkts) {
5468 goto loopback_test_done;
5469 }
5470
bb4f98ab 5471 rx_buf = &rxr->rx_buf_ring[rx_start_idx];
b6016b76
MC
5472 rx_skb = rx_buf->skb;
5473
5474 rx_hdr = (struct l2_fhdr *) rx_skb->data;
d89cb6af 5475 skb_reserve(rx_skb, BNX2_RX_OFFSET);
b6016b76
MC
5476
5477 pci_dma_sync_single_for_cpu(bp->pdev,
5478 pci_unmap_addr(rx_buf, mapping),
5479 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5480
ade2bfe7 5481 if (rx_hdr->l2_fhdr_status &
b6016b76
MC
5482 (L2_FHDR_ERRORS_BAD_CRC |
5483 L2_FHDR_ERRORS_PHY_DECODE |
5484 L2_FHDR_ERRORS_ALIGNMENT |
5485 L2_FHDR_ERRORS_TOO_SHORT |
5486 L2_FHDR_ERRORS_GIANT_FRAME)) {
5487
5488 goto loopback_test_done;
5489 }
5490
5491 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5492 goto loopback_test_done;
5493 }
5494
5495 for (i = 14; i < pkt_size; i++) {
5496 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5497 goto loopback_test_done;
5498 }
5499 }
5500
5501 ret = 0;
5502
5503loopback_test_done:
5504 bp->loopback = 0;
5505 return ret;
5506}
5507
bc5a0690
MC
5508#define BNX2_MAC_LOOPBACK_FAILED 1
5509#define BNX2_PHY_LOOPBACK_FAILED 2
5510#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5511 BNX2_PHY_LOOPBACK_FAILED)
5512
5513static int
5514bnx2_test_loopback(struct bnx2 *bp)
5515{
5516 int rc = 0;
5517
5518 if (!netif_running(bp->dev))
5519 return BNX2_LOOPBACK_FAILED;
5520
5521 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5522 spin_lock_bh(&bp->phy_lock);
9a120bc5 5523 bnx2_init_phy(bp, 1);
bc5a0690
MC
5524 spin_unlock_bh(&bp->phy_lock);
5525 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5526 rc |= BNX2_MAC_LOOPBACK_FAILED;
5527 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5528 rc |= BNX2_PHY_LOOPBACK_FAILED;
5529 return rc;
5530}
5531
b6016b76
MC
5532#define NVRAM_SIZE 0x200
5533#define CRC32_RESIDUAL 0xdebb20e3
5534
5535static int
5536bnx2_test_nvram(struct bnx2 *bp)
5537{
b491edd5 5538 __be32 buf[NVRAM_SIZE / 4];
b6016b76
MC
5539 u8 *data = (u8 *) buf;
5540 int rc = 0;
5541 u32 magic, csum;
5542
5543 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5544 goto test_nvram_done;
5545
5546 magic = be32_to_cpu(buf[0]);
5547 if (magic != 0x669955aa) {
5548 rc = -ENODEV;
5549 goto test_nvram_done;
5550 }
5551
5552 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5553 goto test_nvram_done;
5554
5555 csum = ether_crc_le(0x100, data);
5556 if (csum != CRC32_RESIDUAL) {
5557 rc = -ENODEV;
5558 goto test_nvram_done;
5559 }
5560
5561 csum = ether_crc_le(0x100, data + 0x100);
5562 if (csum != CRC32_RESIDUAL) {
5563 rc = -ENODEV;
5564 }
5565
5566test_nvram_done:
5567 return rc;
5568}
5569
5570static int
5571bnx2_test_link(struct bnx2 *bp)
5572{
5573 u32 bmsr;
5574
9f52b564
MC
5575 if (!netif_running(bp->dev))
5576 return -ENODEV;
5577
583c28e5 5578 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
489310a4
MC
5579 if (bp->link_up)
5580 return 0;
5581 return -ENODEV;
5582 }
c770a65c 5583 spin_lock_bh(&bp->phy_lock);
27a005b8
MC
5584 bnx2_enable_bmsr1(bp);
5585 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5586 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5587 bnx2_disable_bmsr1(bp);
c770a65c 5588 spin_unlock_bh(&bp->phy_lock);
6aa20a22 5589
b6016b76
MC
5590 if (bmsr & BMSR_LSTATUS) {
5591 return 0;
5592 }
5593 return -ENODEV;
5594}
5595
5596static int
5597bnx2_test_intr(struct bnx2 *bp)
5598{
5599 int i;
b6016b76
MC
5600 u16 status_idx;
5601
5602 if (!netif_running(bp->dev))
5603 return -ENODEV;
5604
5605 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5606
5607 /* This register is not touched during run-time. */
bf5295bb 5608 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
b6016b76
MC
5609 REG_RD(bp, BNX2_HC_COMMAND);
5610
5611 for (i = 0; i < 10; i++) {
5612 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5613 status_idx) {
5614
5615 break;
5616 }
5617
5618 msleep_interruptible(10);
5619 }
5620 if (i < 10)
5621 return 0;
5622
5623 return -ENODEV;
5624}
5625
38ea3686 5626/* Determining link for parallel detection. */
b2fadeae
MC
5627static int
5628bnx2_5706_serdes_has_link(struct bnx2 *bp)
5629{
5630 u32 mode_ctl, an_dbg, exp;
5631
38ea3686
MC
5632 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5633 return 0;
5634
b2fadeae
MC
5635 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5636 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5637
5638 if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5639 return 0;
5640
5641 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5642 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5643 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5644
f3014c0c 5645 if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
b2fadeae
MC
5646 return 0;
5647
5648 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5649 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5650 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5651
5652 if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
5653 return 0;
5654
5655 return 1;
5656}
5657
b6016b76 5658static void
48b01e2d 5659bnx2_5706_serdes_timer(struct bnx2 *bp)
b6016b76 5660{
b2fadeae
MC
5661 int check_link = 1;
5662
48b01e2d 5663 spin_lock(&bp->phy_lock);
b2fadeae 5664 if (bp->serdes_an_pending) {
48b01e2d 5665 bp->serdes_an_pending--;
b2fadeae
MC
5666 check_link = 0;
5667 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
48b01e2d 5668 u32 bmcr;
b6016b76 5669
ac392abc 5670 bp->current_interval = BNX2_TIMER_INTERVAL;
cd339a0e 5671
ca58c3af 5672 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76 5673
48b01e2d 5674 if (bmcr & BMCR_ANENABLE) {
b2fadeae 5675 if (bnx2_5706_serdes_has_link(bp)) {
48b01e2d
MC
5676 bmcr &= ~BMCR_ANENABLE;
5677 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
ca58c3af 5678 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
583c28e5 5679 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
48b01e2d 5680 }
b6016b76 5681 }
48b01e2d
MC
5682 }
5683 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
583c28e5 5684 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
48b01e2d 5685 u32 phy2;
b6016b76 5686
48b01e2d
MC
5687 bnx2_write_phy(bp, 0x17, 0x0f01);
5688 bnx2_read_phy(bp, 0x15, &phy2);
5689 if (phy2 & 0x20) {
5690 u32 bmcr;
cd339a0e 5691
ca58c3af 5692 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
48b01e2d 5693 bmcr |= BMCR_ANENABLE;
ca58c3af 5694 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
b6016b76 5695
583c28e5 5696 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
48b01e2d
MC
5697 }
5698 } else
ac392abc 5699 bp->current_interval = BNX2_TIMER_INTERVAL;
b6016b76 5700
a2724e25 5701 if (check_link) {
b2fadeae
MC
5702 u32 val;
5703
5704 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5705 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5706 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5707
a2724e25
MC
5708 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5709 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5710 bnx2_5706s_force_link_dn(bp, 1);
5711 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5712 } else
5713 bnx2_set_link(bp);
5714 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
5715 bnx2_set_link(bp);
b2fadeae 5716 }
48b01e2d
MC
5717 spin_unlock(&bp->phy_lock);
5718}
b6016b76 5719
f8dd064e
MC
5720static void
5721bnx2_5708_serdes_timer(struct bnx2 *bp)
5722{
583c28e5 5723 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
5724 return;
5725
583c28e5 5726 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
f8dd064e
MC
5727 bp->serdes_an_pending = 0;
5728 return;
5729 }
b6016b76 5730
f8dd064e
MC
5731 spin_lock(&bp->phy_lock);
5732 if (bp->serdes_an_pending)
5733 bp->serdes_an_pending--;
5734 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5735 u32 bmcr;
b6016b76 5736
ca58c3af 5737 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
f8dd064e 5738 if (bmcr & BMCR_ANENABLE) {
605a9e20 5739 bnx2_enable_forced_2g5(bp);
40105c0b 5740 bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
f8dd064e 5741 } else {
605a9e20 5742 bnx2_disable_forced_2g5(bp);
f8dd064e 5743 bp->serdes_an_pending = 2;
ac392abc 5744 bp->current_interval = BNX2_TIMER_INTERVAL;
b6016b76 5745 }
b6016b76 5746
f8dd064e 5747 } else
ac392abc 5748 bp->current_interval = BNX2_TIMER_INTERVAL;
b6016b76 5749
f8dd064e
MC
5750 spin_unlock(&bp->phy_lock);
5751}
5752
48b01e2d
MC
5753static void
5754bnx2_timer(unsigned long data)
5755{
5756 struct bnx2 *bp = (struct bnx2 *) data;
b6016b76 5757
48b01e2d
MC
5758 if (!netif_running(bp->dev))
5759 return;
b6016b76 5760
48b01e2d
MC
5761 if (atomic_read(&bp->intr_sem) != 0)
5762 goto bnx2_restart_timer;
b6016b76 5763
efba0180
MC
5764 if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
5765 BNX2_FLAG_USING_MSI)
5766 bnx2_chk_missed_msi(bp);
5767
df149d70 5768 bnx2_send_heart_beat(bp);
b6016b76 5769
2726d6e1
MC
5770 bp->stats_blk->stat_FwRxDrop =
5771 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
b6016b76 5772
02537b06
MC
5773 /* workaround occasional corrupted counters */
5774 if (CHIP_NUM(bp) == CHIP_NUM_5708 && bp->stats_ticks)
5775 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
5776 BNX2_HC_COMMAND_STATS_NOW);
5777
583c28e5 5778 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
f8dd064e
MC
5779 if (CHIP_NUM(bp) == CHIP_NUM_5706)
5780 bnx2_5706_serdes_timer(bp);
27a005b8 5781 else
f8dd064e 5782 bnx2_5708_serdes_timer(bp);
b6016b76
MC
5783 }
5784
5785bnx2_restart_timer:
cd339a0e 5786 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
5787}
5788
8e6a72c4
MC
5789static int
5790bnx2_request_irq(struct bnx2 *bp)
5791{
6d866ffc 5792 unsigned long flags;
b4b36042
MC
5793 struct bnx2_irq *irq;
5794 int rc = 0, i;
8e6a72c4 5795
f86e82fb 5796 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6d866ffc
MC
5797 flags = 0;
5798 else
5799 flags = IRQF_SHARED;
b4b36042
MC
5800
5801 for (i = 0; i < bp->irq_nvecs; i++) {
5802 irq = &bp->irq_tbl[i];
c76c0475 5803 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
f0ea2e63 5804 &bp->bnx2_napi[i]);
b4b36042
MC
5805 if (rc)
5806 break;
5807 irq->requested = 1;
5808 }
8e6a72c4
MC
5809 return rc;
5810}
5811
5812static void
5813bnx2_free_irq(struct bnx2 *bp)
5814{
b4b36042
MC
5815 struct bnx2_irq *irq;
5816 int i;
8e6a72c4 5817
b4b36042
MC
5818 for (i = 0; i < bp->irq_nvecs; i++) {
5819 irq = &bp->irq_tbl[i];
5820 if (irq->requested)
f0ea2e63 5821 free_irq(irq->vector, &bp->bnx2_napi[i]);
b4b36042 5822 irq->requested = 0;
6d866ffc 5823 }
f86e82fb 5824 if (bp->flags & BNX2_FLAG_USING_MSI)
b4b36042 5825 pci_disable_msi(bp->pdev);
f86e82fb 5826 else if (bp->flags & BNX2_FLAG_USING_MSIX)
b4b36042
MC
5827 pci_disable_msix(bp->pdev);
5828
f86e82fb 5829 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
b4b36042
MC
5830}
5831
5832static void
5e9ad9e1 5833bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
b4b36042 5834{
57851d84
MC
5835 int i, rc;
5836 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
4e1d0de9
MC
5837 struct net_device *dev = bp->dev;
5838 const int len = sizeof(bp->irq_tbl[0].name);
57851d84 5839
b4b36042
MC
5840 bnx2_setup_msix_tbl(bp);
5841 REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
5842 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
5843 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
57851d84
MC
5844
5845 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5846 msix_ent[i].entry = i;
5847 msix_ent[i].vector = 0;
35e9010b 5848
4e1d0de9 5849 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
f0ea2e63 5850 bp->irq_tbl[i].handler = bnx2_msi_1shot;
57851d84
MC
5851 }
5852
5853 rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
5854 if (rc != 0)
5855 return;
5856
5e9ad9e1 5857 bp->irq_nvecs = msix_vecs;
f86e82fb 5858 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
57851d84
MC
5859 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
5860 bp->irq_tbl[i].vector = msix_ent[i].vector;
6d866ffc
MC
5861}
5862
5863static void
5864bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
5865{
5e9ad9e1 5866 int cpus = num_online_cpus();
706bf240 5867 int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
5e9ad9e1 5868
6d866ffc
MC
5869 bp->irq_tbl[0].handler = bnx2_interrupt;
5870 strcpy(bp->irq_tbl[0].name, bp->dev->name);
b4b36042
MC
5871 bp->irq_nvecs = 1;
5872 bp->irq_tbl[0].vector = bp->pdev->irq;
5873
5e9ad9e1
MC
5874 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
5875 bnx2_enable_msix(bp, msix_vecs);
6d866ffc 5876
f86e82fb
DM
5877 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
5878 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6d866ffc 5879 if (pci_enable_msi(bp->pdev) == 0) {
f86e82fb 5880 bp->flags |= BNX2_FLAG_USING_MSI;
6d866ffc 5881 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
f86e82fb 5882 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6d866ffc
MC
5883 bp->irq_tbl[0].handler = bnx2_msi_1shot;
5884 } else
5885 bp->irq_tbl[0].handler = bnx2_msi;
b4b36042
MC
5886
5887 bp->irq_tbl[0].vector = bp->pdev->irq;
6d866ffc
MC
5888 }
5889 }
706bf240
BL
5890
5891 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
5892 bp->dev->real_num_tx_queues = bp->num_tx_rings;
5893
5e9ad9e1 5894 bp->num_rx_rings = bp->irq_nvecs;
8e6a72c4
MC
5895}
5896
b6016b76
MC
5897/* Called with rtnl_lock */
5898static int
5899bnx2_open(struct net_device *dev)
5900{
972ec0d4 5901 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5902 int rc;
5903
1b2f922f
MC
5904 netif_carrier_off(dev);
5905
829ca9a3 5906 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
5907 bnx2_disable_int(bp);
5908
35e9010b
MC
5909 bnx2_setup_int_mode(bp, disable_msi);
5910 bnx2_napi_enable(bp);
b6016b76 5911 rc = bnx2_alloc_mem(bp);
2739a8bb
MC
5912 if (rc)
5913 goto open_err;
b6016b76 5914
8e6a72c4 5915 rc = bnx2_request_irq(bp);
2739a8bb
MC
5916 if (rc)
5917 goto open_err;
b6016b76 5918
9a120bc5 5919 rc = bnx2_init_nic(bp, 1);
2739a8bb
MC
5920 if (rc)
5921 goto open_err;
6aa20a22 5922
cd339a0e 5923 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
5924
5925 atomic_set(&bp->intr_sem, 0);
5926
5927 bnx2_enable_int(bp);
5928
f86e82fb 5929 if (bp->flags & BNX2_FLAG_USING_MSI) {
b6016b76
MC
5930 /* Test MSI to make sure it is working
5931 * If MSI test fails, go back to INTx mode
5932 */
5933 if (bnx2_test_intr(bp) != 0) {
5934 printk(KERN_WARNING PFX "%s: No interrupt was generated"
5935 " using MSI, switching to INTx mode. Please"
5936 " report this failure to the PCI maintainer"
5937 " and include system chipset information.\n",
5938 bp->dev->name);
5939
5940 bnx2_disable_int(bp);
8e6a72c4 5941 bnx2_free_irq(bp);
b6016b76 5942
6d866ffc
MC
5943 bnx2_setup_int_mode(bp, 1);
5944
9a120bc5 5945 rc = bnx2_init_nic(bp, 0);
b6016b76 5946
8e6a72c4
MC
5947 if (!rc)
5948 rc = bnx2_request_irq(bp);
5949
b6016b76 5950 if (rc) {
b6016b76 5951 del_timer_sync(&bp->timer);
2739a8bb 5952 goto open_err;
b6016b76
MC
5953 }
5954 bnx2_enable_int(bp);
5955 }
5956 }
f86e82fb 5957 if (bp->flags & BNX2_FLAG_USING_MSI)
b6016b76 5958 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
f86e82fb 5959 else if (bp->flags & BNX2_FLAG_USING_MSIX)
57851d84 5960 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
b6016b76 5961
706bf240 5962 netif_tx_start_all_queues(dev);
b6016b76
MC
5963
5964 return 0;
2739a8bb
MC
5965
5966open_err:
5967 bnx2_napi_disable(bp);
5968 bnx2_free_skbs(bp);
5969 bnx2_free_irq(bp);
5970 bnx2_free_mem(bp);
5971 return rc;
b6016b76
MC
5972}
5973
5974static void
c4028958 5975bnx2_reset_task(struct work_struct *work)
b6016b76 5976{
c4028958 5977 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
b6016b76 5978
afdc08b9
MC
5979 if (!netif_running(bp->dev))
5980 return;
5981
b6016b76
MC
5982 bnx2_netif_stop(bp);
5983
9a120bc5 5984 bnx2_init_nic(bp, 1);
b6016b76
MC
5985
5986 atomic_set(&bp->intr_sem, 1);
5987 bnx2_netif_start(bp);
5988}
5989
5990static void
5991bnx2_tx_timeout(struct net_device *dev)
5992{
972ec0d4 5993 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
5994
5995 /* This allows the netif to be shutdown gracefully before resetting */
5996 schedule_work(&bp->reset_task);
5997}
5998
5999#ifdef BCM_VLAN
6000/* Called with rtnl_lock */
6001static void
6002bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6003{
972ec0d4 6004 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6005
6006 bnx2_netif_stop(bp);
6007
6008 bp->vlgrp = vlgrp;
6009 bnx2_set_rx_mode(dev);
7c62e83b
MC
6010 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6011 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
b6016b76
MC
6012
6013 bnx2_netif_start(bp);
6014}
b6016b76
MC
6015#endif
6016
932ff279 6017/* Called with netif_tx_lock.
2f8af120
MC
6018 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6019 * netif_wake_queue().
b6016b76
MC
6020 */
6021static int
6022bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6023{
972ec0d4 6024 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6025 dma_addr_t mapping;
6026 struct tx_bd *txbd;
3d16af86 6027 struct sw_tx_bd *tx_buf;
b6016b76
MC
6028 u32 len, vlan_tag_flags, last_frag, mss;
6029 u16 prod, ring_prod;
6030 int i;
706bf240
BL
6031 struct bnx2_napi *bnapi;
6032 struct bnx2_tx_ring_info *txr;
6033 struct netdev_queue *txq;
3d16af86 6034 struct skb_shared_info *sp;
706bf240
BL
6035
6036 /* Determine which tx ring we will be placed on */
6037 i = skb_get_queue_mapping(skb);
6038 bnapi = &bp->bnx2_napi[i];
6039 txr = &bnapi->tx_ring;
6040 txq = netdev_get_tx_queue(dev, i);
b6016b76 6041
35e9010b 6042 if (unlikely(bnx2_tx_avail(bp, txr) <
a550c99b 6043 (skb_shinfo(skb)->nr_frags + 1))) {
706bf240 6044 netif_tx_stop_queue(txq);
b6016b76
MC
6045 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
6046 dev->name);
6047
6048 return NETDEV_TX_BUSY;
6049 }
6050 len = skb_headlen(skb);
35e9010b 6051 prod = txr->tx_prod;
b6016b76
MC
6052 ring_prod = TX_RING_IDX(prod);
6053
6054 vlan_tag_flags = 0;
84fa7933 6055 if (skb->ip_summed == CHECKSUM_PARTIAL) {
b6016b76
MC
6056 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6057 }
6058
729b85cd 6059#ifdef BCM_VLAN
79ea13ce 6060 if (bp->vlgrp && vlan_tx_tag_present(skb)) {
b6016b76
MC
6061 vlan_tag_flags |=
6062 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6063 }
729b85cd 6064#endif
fde82055 6065 if ((mss = skb_shinfo(skb)->gso_size)) {
a1efb4b6 6066 u32 tcp_opt_len;
eddc9ec5 6067 struct iphdr *iph;
b6016b76 6068
b6016b76
MC
6069 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6070
4666f87a
MC
6071 tcp_opt_len = tcp_optlen(skb);
6072
6073 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6074 u32 tcp_off = skb_transport_offset(skb) -
6075 sizeof(struct ipv6hdr) - ETH_HLEN;
ab6a5bb6 6076
4666f87a
MC
6077 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6078 TX_BD_FLAGS_SW_FLAGS;
6079 if (likely(tcp_off == 0))
6080 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6081 else {
6082 tcp_off >>= 3;
6083 vlan_tag_flags |= ((tcp_off & 0x3) <<
6084 TX_BD_FLAGS_TCP6_OFF0_SHL) |
6085 ((tcp_off & 0x10) <<
6086 TX_BD_FLAGS_TCP6_OFF4_SHL);
6087 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6088 }
6089 } else {
4666f87a 6090 iph = ip_hdr(skb);
4666f87a
MC
6091 if (tcp_opt_len || (iph->ihl > 5)) {
6092 vlan_tag_flags |= ((iph->ihl - 5) +
6093 (tcp_opt_len >> 2)) << 8;
6094 }
b6016b76 6095 }
4666f87a 6096 } else
b6016b76 6097 mss = 0;
b6016b76 6098
3d16af86
BL
6099 if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
6100 dev_kfree_skb(skb);
6101 return NETDEV_TX_OK;
6102 }
6103
6104 sp = skb_shinfo(skb);
6105 mapping = sp->dma_maps[0];
6aa20a22 6106
35e9010b 6107 tx_buf = &txr->tx_buf_ring[ring_prod];
b6016b76 6108 tx_buf->skb = skb;
b6016b76 6109
35e9010b 6110 txbd = &txr->tx_desc_ring[ring_prod];
b6016b76
MC
6111
6112 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6113 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6114 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6115 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6116
6117 last_frag = skb_shinfo(skb)->nr_frags;
6118
6119 for (i = 0; i < last_frag; i++) {
6120 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6121
6122 prod = NEXT_TX_BD(prod);
6123 ring_prod = TX_RING_IDX(prod);
35e9010b 6124 txbd = &txr->tx_desc_ring[ring_prod];
b6016b76
MC
6125
6126 len = frag->size;
3d16af86 6127 mapping = sp->dma_maps[i + 1];
b6016b76
MC
6128
6129 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6130 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6131 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6132 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6133
6134 }
6135 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6136
6137 prod = NEXT_TX_BD(prod);
35e9010b 6138 txr->tx_prod_bseq += skb->len;
b6016b76 6139
35e9010b
MC
6140 REG_WR16(bp, txr->tx_bidx_addr, prod);
6141 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
b6016b76
MC
6142
6143 mmiowb();
6144
35e9010b 6145 txr->tx_prod = prod;
b6016b76
MC
6146 dev->trans_start = jiffies;
6147
35e9010b 6148 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
706bf240 6149 netif_tx_stop_queue(txq);
35e9010b 6150 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
706bf240 6151 netif_tx_wake_queue(txq);
b6016b76
MC
6152 }
6153
6154 return NETDEV_TX_OK;
6155}
6156
6157/* Called with rtnl_lock */
6158static int
6159bnx2_close(struct net_device *dev)
6160{
972ec0d4 6161 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6162
4bb073c0 6163 cancel_work_sync(&bp->reset_task);
afdc08b9 6164
bea3348e 6165 bnx2_disable_int_sync(bp);
35efa7c1 6166 bnx2_napi_disable(bp);
b6016b76 6167 del_timer_sync(&bp->timer);
74bf4ba3 6168 bnx2_shutdown_chip(bp);
8e6a72c4 6169 bnx2_free_irq(bp);
b6016b76
MC
6170 bnx2_free_skbs(bp);
6171 bnx2_free_mem(bp);
6172 bp->link_up = 0;
6173 netif_carrier_off(bp->dev);
829ca9a3 6174 bnx2_set_power_state(bp, PCI_D3hot);
b6016b76
MC
6175 return 0;
6176}
6177
6178#define GET_NET_STATS64(ctr) \
6179 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
6180 (unsigned long) (ctr##_lo)
6181
6182#define GET_NET_STATS32(ctr) \
6183 (ctr##_lo)
6184
6185#if (BITS_PER_LONG == 64)
6186#define GET_NET_STATS GET_NET_STATS64
6187#else
6188#define GET_NET_STATS GET_NET_STATS32
6189#endif
6190
6191static struct net_device_stats *
6192bnx2_get_stats(struct net_device *dev)
6193{
972ec0d4 6194 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6195 struct statistics_block *stats_blk = bp->stats_blk;
d8e8034d 6196 struct net_device_stats *net_stats = &dev->stats;
b6016b76
MC
6197
6198 if (bp->stats_blk == NULL) {
6199 return net_stats;
6200 }
6201 net_stats->rx_packets =
6202 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
6203 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
6204 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
6205
6206 net_stats->tx_packets =
6207 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
6208 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
6209 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
6210
6211 net_stats->rx_bytes =
6212 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
6213
6214 net_stats->tx_bytes =
6215 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
6216
6aa20a22 6217 net_stats->multicast =
b6016b76
MC
6218 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
6219
6aa20a22 6220 net_stats->collisions =
b6016b76
MC
6221 (unsigned long) stats_blk->stat_EtherStatsCollisions;
6222
6aa20a22 6223 net_stats->rx_length_errors =
b6016b76
MC
6224 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
6225 stats_blk->stat_EtherStatsOverrsizePkts);
6226
6aa20a22 6227 net_stats->rx_over_errors =
b6016b76
MC
6228 (unsigned long) stats_blk->stat_IfInMBUFDiscards;
6229
6aa20a22 6230 net_stats->rx_frame_errors =
b6016b76
MC
6231 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
6232
6aa20a22 6233 net_stats->rx_crc_errors =
b6016b76
MC
6234 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
6235
6236 net_stats->rx_errors = net_stats->rx_length_errors +
6237 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6238 net_stats->rx_crc_errors;
6239
6240 net_stats->tx_aborted_errors =
6241 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
6242 stats_blk->stat_Dot3StatsLateCollisions);
6243
5b0c76ad
MC
6244 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6245 (CHIP_ID(bp) == CHIP_ID_5708_A0))
b6016b76
MC
6246 net_stats->tx_carrier_errors = 0;
6247 else {
6248 net_stats->tx_carrier_errors =
6249 (unsigned long)
6250 stats_blk->stat_Dot3StatsCarrierSenseErrors;
6251 }
6252
6253 net_stats->tx_errors =
6aa20a22 6254 (unsigned long)
b6016b76
MC
6255 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
6256 +
6257 net_stats->tx_aborted_errors +
6258 net_stats->tx_carrier_errors;
6259
cea94db9
MC
6260 net_stats->rx_missed_errors =
6261 (unsigned long) (stats_blk->stat_IfInMBUFDiscards +
6262 stats_blk->stat_FwRxDrop);
6263
b6016b76
MC
6264 return net_stats;
6265}
6266
6267/* All ethtool functions called with rtnl_lock */
6268
6269static int
6270bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6271{
972ec0d4 6272 struct bnx2 *bp = netdev_priv(dev);
7b6b8347 6273 int support_serdes = 0, support_copper = 0;
b6016b76
MC
6274
6275 cmd->supported = SUPPORTED_Autoneg;
583c28e5 6276 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7b6b8347
MC
6277 support_serdes = 1;
6278 support_copper = 1;
6279 } else if (bp->phy_port == PORT_FIBRE)
6280 support_serdes = 1;
6281 else
6282 support_copper = 1;
6283
6284 if (support_serdes) {
b6016b76
MC
6285 cmd->supported |= SUPPORTED_1000baseT_Full |
6286 SUPPORTED_FIBRE;
583c28e5 6287 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
605a9e20 6288 cmd->supported |= SUPPORTED_2500baseX_Full;
b6016b76 6289
b6016b76 6290 }
7b6b8347 6291 if (support_copper) {
b6016b76
MC
6292 cmd->supported |= SUPPORTED_10baseT_Half |
6293 SUPPORTED_10baseT_Full |
6294 SUPPORTED_100baseT_Half |
6295 SUPPORTED_100baseT_Full |
6296 SUPPORTED_1000baseT_Full |
6297 SUPPORTED_TP;
6298
b6016b76
MC
6299 }
6300
7b6b8347
MC
6301 spin_lock_bh(&bp->phy_lock);
6302 cmd->port = bp->phy_port;
b6016b76
MC
6303 cmd->advertising = bp->advertising;
6304
6305 if (bp->autoneg & AUTONEG_SPEED) {
6306 cmd->autoneg = AUTONEG_ENABLE;
6307 }
6308 else {
6309 cmd->autoneg = AUTONEG_DISABLE;
6310 }
6311
6312 if (netif_carrier_ok(dev)) {
6313 cmd->speed = bp->line_speed;
6314 cmd->duplex = bp->duplex;
6315 }
6316 else {
6317 cmd->speed = -1;
6318 cmd->duplex = -1;
6319 }
7b6b8347 6320 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6321
6322 cmd->transceiver = XCVR_INTERNAL;
6323 cmd->phy_address = bp->phy_addr;
6324
6325 return 0;
6326}
6aa20a22 6327
b6016b76
MC
6328static int
6329bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6330{
972ec0d4 6331 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6332 u8 autoneg = bp->autoneg;
6333 u8 req_duplex = bp->req_duplex;
6334 u16 req_line_speed = bp->req_line_speed;
6335 u32 advertising = bp->advertising;
7b6b8347
MC
6336 int err = -EINVAL;
6337
6338 spin_lock_bh(&bp->phy_lock);
6339
6340 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6341 goto err_out_unlock;
6342
583c28e5
MC
6343 if (cmd->port != bp->phy_port &&
6344 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
7b6b8347 6345 goto err_out_unlock;
b6016b76 6346
d6b14486
MC
6347 /* If device is down, we can store the settings only if the user
6348 * is setting the currently active port.
6349 */
6350 if (!netif_running(dev) && cmd->port != bp->phy_port)
6351 goto err_out_unlock;
6352
b6016b76
MC
6353 if (cmd->autoneg == AUTONEG_ENABLE) {
6354 autoneg |= AUTONEG_SPEED;
6355
6aa20a22 6356 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
b6016b76
MC
6357
6358 /* allow advertising 1 speed */
6359 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6360 (cmd->advertising == ADVERTISED_10baseT_Full) ||
6361 (cmd->advertising == ADVERTISED_100baseT_Half) ||
6362 (cmd->advertising == ADVERTISED_100baseT_Full)) {
6363
7b6b8347
MC
6364 if (cmd->port == PORT_FIBRE)
6365 goto err_out_unlock;
b6016b76
MC
6366
6367 advertising = cmd->advertising;
6368
27a005b8 6369 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
583c28e5 6370 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
7b6b8347
MC
6371 (cmd->port == PORT_TP))
6372 goto err_out_unlock;
6373 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
b6016b76 6374 advertising = cmd->advertising;
7b6b8347
MC
6375 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6376 goto err_out_unlock;
b6016b76 6377 else {
7b6b8347 6378 if (cmd->port == PORT_FIBRE)
b6016b76 6379 advertising = ETHTOOL_ALL_FIBRE_SPEED;
7b6b8347 6380 else
b6016b76 6381 advertising = ETHTOOL_ALL_COPPER_SPEED;
b6016b76
MC
6382 }
6383 advertising |= ADVERTISED_Autoneg;
6384 }
6385 else {
7b6b8347 6386 if (cmd->port == PORT_FIBRE) {
80be4434
MC
6387 if ((cmd->speed != SPEED_1000 &&
6388 cmd->speed != SPEED_2500) ||
6389 (cmd->duplex != DUPLEX_FULL))
7b6b8347 6390 goto err_out_unlock;
80be4434
MC
6391
6392 if (cmd->speed == SPEED_2500 &&
583c28e5 6393 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
7b6b8347 6394 goto err_out_unlock;
b6016b76 6395 }
7b6b8347
MC
6396 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6397 goto err_out_unlock;
6398
b6016b76
MC
6399 autoneg &= ~AUTONEG_SPEED;
6400 req_line_speed = cmd->speed;
6401 req_duplex = cmd->duplex;
6402 advertising = 0;
6403 }
6404
6405 bp->autoneg = autoneg;
6406 bp->advertising = advertising;
6407 bp->req_line_speed = req_line_speed;
6408 bp->req_duplex = req_duplex;
6409
d6b14486
MC
6410 err = 0;
6411 /* If device is down, the new settings will be picked up when it is
6412 * brought up.
6413 */
6414 if (netif_running(dev))
6415 err = bnx2_setup_phy(bp, cmd->port);
b6016b76 6416
7b6b8347 6417err_out_unlock:
c770a65c 6418 spin_unlock_bh(&bp->phy_lock);
b6016b76 6419
7b6b8347 6420 return err;
b6016b76
MC
6421}
6422
6423static void
6424bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6425{
972ec0d4 6426 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6427
6428 strcpy(info->driver, DRV_MODULE_NAME);
6429 strcpy(info->version, DRV_MODULE_VERSION);
6430 strcpy(info->bus_info, pci_name(bp->pdev));
58fc2ea4 6431 strcpy(info->fw_version, bp->fw_version);
b6016b76
MC
6432}
6433
244ac4f4
MC
6434#define BNX2_REGDUMP_LEN (32 * 1024)
6435
6436static int
6437bnx2_get_regs_len(struct net_device *dev)
6438{
6439 return BNX2_REGDUMP_LEN;
6440}
6441
6442static void
6443bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6444{
6445 u32 *p = _p, i, offset;
6446 u8 *orig_p = _p;
6447 struct bnx2 *bp = netdev_priv(dev);
6448 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6449 0x0800, 0x0880, 0x0c00, 0x0c10,
6450 0x0c30, 0x0d08, 0x1000, 0x101c,
6451 0x1040, 0x1048, 0x1080, 0x10a4,
6452 0x1400, 0x1490, 0x1498, 0x14f0,
6453 0x1500, 0x155c, 0x1580, 0x15dc,
6454 0x1600, 0x1658, 0x1680, 0x16d8,
6455 0x1800, 0x1820, 0x1840, 0x1854,
6456 0x1880, 0x1894, 0x1900, 0x1984,
6457 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6458 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6459 0x2000, 0x2030, 0x23c0, 0x2400,
6460 0x2800, 0x2820, 0x2830, 0x2850,
6461 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6462 0x3c00, 0x3c94, 0x4000, 0x4010,
6463 0x4080, 0x4090, 0x43c0, 0x4458,
6464 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6465 0x4fc0, 0x5010, 0x53c0, 0x5444,
6466 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6467 0x5fc0, 0x6000, 0x6400, 0x6428,
6468 0x6800, 0x6848, 0x684c, 0x6860,
6469 0x6888, 0x6910, 0x8000 };
6470
6471 regs->version = 0;
6472
6473 memset(p, 0, BNX2_REGDUMP_LEN);
6474
6475 if (!netif_running(bp->dev))
6476 return;
6477
6478 i = 0;
6479 offset = reg_boundaries[0];
6480 p += offset;
6481 while (offset < BNX2_REGDUMP_LEN) {
6482 *p++ = REG_RD(bp, offset);
6483 offset += 4;
6484 if (offset == reg_boundaries[i + 1]) {
6485 offset = reg_boundaries[i + 2];
6486 p = (u32 *) (orig_p + offset);
6487 i += 2;
6488 }
6489 }
6490}
6491
b6016b76
MC
6492static void
6493bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6494{
972ec0d4 6495 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6496
f86e82fb 6497 if (bp->flags & BNX2_FLAG_NO_WOL) {
b6016b76
MC
6498 wol->supported = 0;
6499 wol->wolopts = 0;
6500 }
6501 else {
6502 wol->supported = WAKE_MAGIC;
6503 if (bp->wol)
6504 wol->wolopts = WAKE_MAGIC;
6505 else
6506 wol->wolopts = 0;
6507 }
6508 memset(&wol->sopass, 0, sizeof(wol->sopass));
6509}
6510
6511static int
6512bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6513{
972ec0d4 6514 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6515
6516 if (wol->wolopts & ~WAKE_MAGIC)
6517 return -EINVAL;
6518
6519 if (wol->wolopts & WAKE_MAGIC) {
f86e82fb 6520 if (bp->flags & BNX2_FLAG_NO_WOL)
b6016b76
MC
6521 return -EINVAL;
6522
6523 bp->wol = 1;
6524 }
6525 else {
6526 bp->wol = 0;
6527 }
6528 return 0;
6529}
6530
6531static int
6532bnx2_nway_reset(struct net_device *dev)
6533{
972ec0d4 6534 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6535 u32 bmcr;
6536
9f52b564
MC
6537 if (!netif_running(dev))
6538 return -EAGAIN;
6539
b6016b76
MC
6540 if (!(bp->autoneg & AUTONEG_SPEED)) {
6541 return -EINVAL;
6542 }
6543
c770a65c 6544 spin_lock_bh(&bp->phy_lock);
b6016b76 6545
583c28e5 6546 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7b6b8347
MC
6547 int rc;
6548
6549 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6550 spin_unlock_bh(&bp->phy_lock);
6551 return rc;
6552 }
6553
b6016b76 6554 /* Force a link down visible on the other side */
583c28e5 6555 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
ca58c3af 6556 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
c770a65c 6557 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6558
6559 msleep(20);
6560
c770a65c 6561 spin_lock_bh(&bp->phy_lock);
f8dd064e 6562
40105c0b 6563 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
f8dd064e
MC
6564 bp->serdes_an_pending = 1;
6565 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
6566 }
6567
ca58c3af 6568 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76 6569 bmcr &= ~BMCR_LOOPBACK;
ca58c3af 6570 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
b6016b76 6571
c770a65c 6572 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6573
6574 return 0;
6575}
6576
6577static int
6578bnx2_get_eeprom_len(struct net_device *dev)
6579{
972ec0d4 6580 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6581
1122db71 6582 if (bp->flash_info == NULL)
b6016b76
MC
6583 return 0;
6584
1122db71 6585 return (int) bp->flash_size;
b6016b76
MC
6586}
6587
6588static int
6589bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6590 u8 *eebuf)
6591{
972ec0d4 6592 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6593 int rc;
6594
9f52b564
MC
6595 if (!netif_running(dev))
6596 return -EAGAIN;
6597
1064e944 6598 /* parameters already validated in ethtool_get_eeprom */
b6016b76
MC
6599
6600 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6601
6602 return rc;
6603}
6604
6605static int
6606bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6607 u8 *eebuf)
6608{
972ec0d4 6609 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6610 int rc;
6611
9f52b564
MC
6612 if (!netif_running(dev))
6613 return -EAGAIN;
6614
1064e944 6615 /* parameters already validated in ethtool_set_eeprom */
b6016b76
MC
6616
6617 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6618
6619 return rc;
6620}
6621
6622static int
6623bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6624{
972ec0d4 6625 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6626
6627 memset(coal, 0, sizeof(struct ethtool_coalesce));
6628
6629 coal->rx_coalesce_usecs = bp->rx_ticks;
6630 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6631 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6632 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6633
6634 coal->tx_coalesce_usecs = bp->tx_ticks;
6635 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6636 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6637 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6638
6639 coal->stats_block_coalesce_usecs = bp->stats_ticks;
6640
6641 return 0;
6642}
6643
6644static int
6645bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6646{
972ec0d4 6647 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6648
6649 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6650 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6651
6aa20a22 6652 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
b6016b76
MC
6653 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6654
6655 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6656 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6657
6658 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6659 if (bp->rx_quick_cons_trip_int > 0xff)
6660 bp->rx_quick_cons_trip_int = 0xff;
6661
6662 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6663 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6664
6665 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6666 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6667
6668 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6669 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6670
6671 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6672 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6673 0xff;
6674
6675 bp->stats_ticks = coal->stats_block_coalesce_usecs;
02537b06
MC
6676 if (CHIP_NUM(bp) == CHIP_NUM_5708) {
6677 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6678 bp->stats_ticks = USEC_PER_SEC;
6679 }
7ea6920e
MC
6680 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6681 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6682 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
b6016b76
MC
6683
6684 if (netif_running(bp->dev)) {
6685 bnx2_netif_stop(bp);
9a120bc5 6686 bnx2_init_nic(bp, 0);
b6016b76
MC
6687 bnx2_netif_start(bp);
6688 }
6689
6690 return 0;
6691}
6692
6693static void
6694bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6695{
972ec0d4 6696 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6697
13daffa2 6698 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
b6016b76 6699 ering->rx_mini_max_pending = 0;
47bf4246 6700 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
b6016b76
MC
6701
6702 ering->rx_pending = bp->rx_ring_size;
6703 ering->rx_mini_pending = 0;
47bf4246 6704 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
b6016b76
MC
6705
6706 ering->tx_max_pending = MAX_TX_DESC_CNT;
6707 ering->tx_pending = bp->tx_ring_size;
6708}
6709
6710static int
5d5d0015 6711bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
b6016b76 6712{
13daffa2
MC
6713 if (netif_running(bp->dev)) {
6714 bnx2_netif_stop(bp);
6715 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6716 bnx2_free_skbs(bp);
6717 bnx2_free_mem(bp);
6718 }
6719
5d5d0015
MC
6720 bnx2_set_rx_ring_size(bp, rx);
6721 bp->tx_ring_size = tx;
b6016b76
MC
6722
6723 if (netif_running(bp->dev)) {
13daffa2
MC
6724 int rc;
6725
6726 rc = bnx2_alloc_mem(bp);
6727 if (rc)
6728 return rc;
9a120bc5 6729 bnx2_init_nic(bp, 0);
b6016b76
MC
6730 bnx2_netif_start(bp);
6731 }
b6016b76
MC
6732 return 0;
6733}
6734
5d5d0015
MC
6735static int
6736bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6737{
6738 struct bnx2 *bp = netdev_priv(dev);
6739 int rc;
6740
6741 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
6742 (ering->tx_pending > MAX_TX_DESC_CNT) ||
6743 (ering->tx_pending <= MAX_SKB_FRAGS)) {
6744
6745 return -EINVAL;
6746 }
6747 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
6748 return rc;
6749}
6750
b6016b76
MC
6751static void
6752bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6753{
972ec0d4 6754 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6755
6756 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
6757 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
6758 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
6759}
6760
6761static int
6762bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6763{
972ec0d4 6764 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6765
6766 bp->req_flow_ctrl = 0;
6767 if (epause->rx_pause)
6768 bp->req_flow_ctrl |= FLOW_CTRL_RX;
6769 if (epause->tx_pause)
6770 bp->req_flow_ctrl |= FLOW_CTRL_TX;
6771
6772 if (epause->autoneg) {
6773 bp->autoneg |= AUTONEG_FLOW_CTRL;
6774 }
6775 else {
6776 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
6777 }
6778
9f52b564
MC
6779 if (netif_running(dev)) {
6780 spin_lock_bh(&bp->phy_lock);
6781 bnx2_setup_phy(bp, bp->phy_port);
6782 spin_unlock_bh(&bp->phy_lock);
6783 }
b6016b76
MC
6784
6785 return 0;
6786}
6787
6788static u32
6789bnx2_get_rx_csum(struct net_device *dev)
6790{
972ec0d4 6791 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6792
6793 return bp->rx_csum;
6794}
6795
6796static int
6797bnx2_set_rx_csum(struct net_device *dev, u32 data)
6798{
972ec0d4 6799 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6800
6801 bp->rx_csum = data;
6802 return 0;
6803}
6804
b11d6213
MC
6805static int
6806bnx2_set_tso(struct net_device *dev, u32 data)
6807{
4666f87a
MC
6808 struct bnx2 *bp = netdev_priv(dev);
6809
6810 if (data) {
b11d6213 6811 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
4666f87a
MC
6812 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6813 dev->features |= NETIF_F_TSO6;
6814 } else
6815 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
6816 NETIF_F_TSO_ECN);
b11d6213
MC
6817 return 0;
6818}
6819
cea94db9 6820#define BNX2_NUM_STATS 46
b6016b76 6821
14ab9b86 6822static struct {
b6016b76
MC
6823 char string[ETH_GSTRING_LEN];
6824} bnx2_stats_str_arr[BNX2_NUM_STATS] = {
6825 { "rx_bytes" },
6826 { "rx_error_bytes" },
6827 { "tx_bytes" },
6828 { "tx_error_bytes" },
6829 { "rx_ucast_packets" },
6830 { "rx_mcast_packets" },
6831 { "rx_bcast_packets" },
6832 { "tx_ucast_packets" },
6833 { "tx_mcast_packets" },
6834 { "tx_bcast_packets" },
6835 { "tx_mac_errors" },
6836 { "tx_carrier_errors" },
6837 { "rx_crc_errors" },
6838 { "rx_align_errors" },
6839 { "tx_single_collisions" },
6840 { "tx_multi_collisions" },
6841 { "tx_deferred" },
6842 { "tx_excess_collisions" },
6843 { "tx_late_collisions" },
6844 { "tx_total_collisions" },
6845 { "rx_fragments" },
6846 { "rx_jabbers" },
6847 { "rx_undersize_packets" },
6848 { "rx_oversize_packets" },
6849 { "rx_64_byte_packets" },
6850 { "rx_65_to_127_byte_packets" },
6851 { "rx_128_to_255_byte_packets" },
6852 { "rx_256_to_511_byte_packets" },
6853 { "rx_512_to_1023_byte_packets" },
6854 { "rx_1024_to_1522_byte_packets" },
6855 { "rx_1523_to_9022_byte_packets" },
6856 { "tx_64_byte_packets" },
6857 { "tx_65_to_127_byte_packets" },
6858 { "tx_128_to_255_byte_packets" },
6859 { "tx_256_to_511_byte_packets" },
6860 { "tx_512_to_1023_byte_packets" },
6861 { "tx_1024_to_1522_byte_packets" },
6862 { "tx_1523_to_9022_byte_packets" },
6863 { "rx_xon_frames" },
6864 { "rx_xoff_frames" },
6865 { "tx_xon_frames" },
6866 { "tx_xoff_frames" },
6867 { "rx_mac_ctrl_frames" },
6868 { "rx_filtered_packets" },
6869 { "rx_discards" },
cea94db9 6870 { "rx_fw_discards" },
b6016b76
MC
6871};
6872
6873#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
6874
f71e1309 6875static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
b6016b76
MC
6876 STATS_OFFSET32(stat_IfHCInOctets_hi),
6877 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6878 STATS_OFFSET32(stat_IfHCOutOctets_hi),
6879 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6880 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
6881 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
6882 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
6883 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
6884 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
6885 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
6886 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6aa20a22
JG
6887 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6888 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
6889 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
6890 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
6891 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
6892 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6893 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
6894 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
6895 STATS_OFFSET32(stat_EtherStatsCollisions),
6896 STATS_OFFSET32(stat_EtherStatsFragments),
6897 STATS_OFFSET32(stat_EtherStatsJabbers),
6898 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
6899 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
6900 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
6901 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
6902 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
6903 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
6904 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
6905 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
6906 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
6907 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
6908 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
6909 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
6910 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
6911 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
6912 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
6913 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
6914 STATS_OFFSET32(stat_XonPauseFramesReceived),
6915 STATS_OFFSET32(stat_XoffPauseFramesReceived),
6916 STATS_OFFSET32(stat_OutXonSent),
6917 STATS_OFFSET32(stat_OutXoffSent),
6918 STATS_OFFSET32(stat_MacControlFramesReceived),
6919 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
6920 STATS_OFFSET32(stat_IfInMBUFDiscards),
cea94db9 6921 STATS_OFFSET32(stat_FwRxDrop),
b6016b76
MC
6922};
6923
6924/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
6925 * skipped because of errata.
6aa20a22 6926 */
14ab9b86 6927static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
b6016b76
MC
6928 8,0,8,8,8,8,8,8,8,8,
6929 4,0,4,4,4,4,4,4,4,4,
6930 4,4,4,4,4,4,4,4,4,4,
6931 4,4,4,4,4,4,4,4,4,4,
cea94db9 6932 4,4,4,4,4,4,
b6016b76
MC
6933};
6934
5b0c76ad
MC
6935static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
6936 8,0,8,8,8,8,8,8,8,8,
6937 4,4,4,4,4,4,4,4,4,4,
6938 4,4,4,4,4,4,4,4,4,4,
6939 4,4,4,4,4,4,4,4,4,4,
cea94db9 6940 4,4,4,4,4,4,
5b0c76ad
MC
6941};
6942
b6016b76
MC
6943#define BNX2_NUM_TESTS 6
6944
14ab9b86 6945static struct {
b6016b76
MC
6946 char string[ETH_GSTRING_LEN];
6947} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
6948 { "register_test (offline)" },
6949 { "memory_test (offline)" },
6950 { "loopback_test (offline)" },
6951 { "nvram_test (online)" },
6952 { "interrupt_test (online)" },
6953 { "link_test (online)" },
6954};
6955
6956static int
b9f2c044 6957bnx2_get_sset_count(struct net_device *dev, int sset)
b6016b76 6958{
b9f2c044
JG
6959 switch (sset) {
6960 case ETH_SS_TEST:
6961 return BNX2_NUM_TESTS;
6962 case ETH_SS_STATS:
6963 return BNX2_NUM_STATS;
6964 default:
6965 return -EOPNOTSUPP;
6966 }
b6016b76
MC
6967}
6968
6969static void
6970bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
6971{
972ec0d4 6972 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6973
9f52b564
MC
6974 bnx2_set_power_state(bp, PCI_D0);
6975
b6016b76
MC
6976 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
6977 if (etest->flags & ETH_TEST_FL_OFFLINE) {
80be4434
MC
6978 int i;
6979
b6016b76
MC
6980 bnx2_netif_stop(bp);
6981 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
6982 bnx2_free_skbs(bp);
6983
6984 if (bnx2_test_registers(bp) != 0) {
6985 buf[0] = 1;
6986 etest->flags |= ETH_TEST_FL_FAILED;
6987 }
6988 if (bnx2_test_memory(bp) != 0) {
6989 buf[1] = 1;
6990 etest->flags |= ETH_TEST_FL_FAILED;
6991 }
bc5a0690 6992 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
b6016b76 6993 etest->flags |= ETH_TEST_FL_FAILED;
b6016b76 6994
9f52b564
MC
6995 if (!netif_running(bp->dev))
6996 bnx2_shutdown_chip(bp);
b6016b76 6997 else {
9a120bc5 6998 bnx2_init_nic(bp, 1);
b6016b76
MC
6999 bnx2_netif_start(bp);
7000 }
7001
7002 /* wait for link up */
80be4434
MC
7003 for (i = 0; i < 7; i++) {
7004 if (bp->link_up)
7005 break;
7006 msleep_interruptible(1000);
7007 }
b6016b76
MC
7008 }
7009
7010 if (bnx2_test_nvram(bp) != 0) {
7011 buf[3] = 1;
7012 etest->flags |= ETH_TEST_FL_FAILED;
7013 }
7014 if (bnx2_test_intr(bp) != 0) {
7015 buf[4] = 1;
7016 etest->flags |= ETH_TEST_FL_FAILED;
7017 }
7018
7019 if (bnx2_test_link(bp) != 0) {
7020 buf[5] = 1;
7021 etest->flags |= ETH_TEST_FL_FAILED;
7022
7023 }
9f52b564
MC
7024 if (!netif_running(bp->dev))
7025 bnx2_set_power_state(bp, PCI_D3hot);
b6016b76
MC
7026}
7027
7028static void
7029bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7030{
7031 switch (stringset) {
7032 case ETH_SS_STATS:
7033 memcpy(buf, bnx2_stats_str_arr,
7034 sizeof(bnx2_stats_str_arr));
7035 break;
7036 case ETH_SS_TEST:
7037 memcpy(buf, bnx2_tests_str_arr,
7038 sizeof(bnx2_tests_str_arr));
7039 break;
7040 }
7041}
7042
b6016b76
MC
7043static void
7044bnx2_get_ethtool_stats(struct net_device *dev,
7045 struct ethtool_stats *stats, u64 *buf)
7046{
972ec0d4 7047 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7048 int i;
7049 u32 *hw_stats = (u32 *) bp->stats_blk;
14ab9b86 7050 u8 *stats_len_arr = NULL;
b6016b76
MC
7051
7052 if (hw_stats == NULL) {
7053 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7054 return;
7055 }
7056
5b0c76ad
MC
7057 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7058 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7059 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7060 (CHIP_ID(bp) == CHIP_ID_5708_A0))
b6016b76 7061 stats_len_arr = bnx2_5706_stats_len_arr;
5b0c76ad
MC
7062 else
7063 stats_len_arr = bnx2_5708_stats_len_arr;
b6016b76
MC
7064
7065 for (i = 0; i < BNX2_NUM_STATS; i++) {
7066 if (stats_len_arr[i] == 0) {
7067 /* skip this counter */
7068 buf[i] = 0;
7069 continue;
7070 }
7071 if (stats_len_arr[i] == 4) {
7072 /* 4-byte counter */
7073 buf[i] = (u64)
7074 *(hw_stats + bnx2_stats_offset_arr[i]);
7075 continue;
7076 }
7077 /* 8-byte counter */
7078 buf[i] = (((u64) *(hw_stats +
7079 bnx2_stats_offset_arr[i])) << 32) +
7080 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
7081 }
7082}
7083
7084static int
7085bnx2_phys_id(struct net_device *dev, u32 data)
7086{
972ec0d4 7087 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7088 int i;
7089 u32 save;
7090
9f52b564
MC
7091 bnx2_set_power_state(bp, PCI_D0);
7092
b6016b76
MC
7093 if (data == 0)
7094 data = 2;
7095
7096 save = REG_RD(bp, BNX2_MISC_CFG);
7097 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7098
7099 for (i = 0; i < (data * 2); i++) {
7100 if ((i % 2) == 0) {
7101 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7102 }
7103 else {
7104 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7105 BNX2_EMAC_LED_1000MB_OVERRIDE |
7106 BNX2_EMAC_LED_100MB_OVERRIDE |
7107 BNX2_EMAC_LED_10MB_OVERRIDE |
7108 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7109 BNX2_EMAC_LED_TRAFFIC);
7110 }
7111 msleep_interruptible(500);
7112 if (signal_pending(current))
7113 break;
7114 }
7115 REG_WR(bp, BNX2_EMAC_LED, 0);
7116 REG_WR(bp, BNX2_MISC_CFG, save);
9f52b564
MC
7117
7118 if (!netif_running(dev))
7119 bnx2_set_power_state(bp, PCI_D3hot);
7120
b6016b76
MC
7121 return 0;
7122}
7123
4666f87a
MC
7124static int
7125bnx2_set_tx_csum(struct net_device *dev, u32 data)
7126{
7127 struct bnx2 *bp = netdev_priv(dev);
7128
7129 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6460d948 7130 return (ethtool_op_set_tx_ipv6_csum(dev, data));
4666f87a
MC
7131 else
7132 return (ethtool_op_set_tx_csum(dev, data));
7133}
7134
7282d491 7135static const struct ethtool_ops bnx2_ethtool_ops = {
b6016b76
MC
7136 .get_settings = bnx2_get_settings,
7137 .set_settings = bnx2_set_settings,
7138 .get_drvinfo = bnx2_get_drvinfo,
244ac4f4
MC
7139 .get_regs_len = bnx2_get_regs_len,
7140 .get_regs = bnx2_get_regs,
b6016b76
MC
7141 .get_wol = bnx2_get_wol,
7142 .set_wol = bnx2_set_wol,
7143 .nway_reset = bnx2_nway_reset,
7144 .get_link = ethtool_op_get_link,
7145 .get_eeprom_len = bnx2_get_eeprom_len,
7146 .get_eeprom = bnx2_get_eeprom,
7147 .set_eeprom = bnx2_set_eeprom,
7148 .get_coalesce = bnx2_get_coalesce,
7149 .set_coalesce = bnx2_set_coalesce,
7150 .get_ringparam = bnx2_get_ringparam,
7151 .set_ringparam = bnx2_set_ringparam,
7152 .get_pauseparam = bnx2_get_pauseparam,
7153 .set_pauseparam = bnx2_set_pauseparam,
7154 .get_rx_csum = bnx2_get_rx_csum,
7155 .set_rx_csum = bnx2_set_rx_csum,
4666f87a 7156 .set_tx_csum = bnx2_set_tx_csum,
b6016b76 7157 .set_sg = ethtool_op_set_sg,
b11d6213 7158 .set_tso = bnx2_set_tso,
b6016b76
MC
7159 .self_test = bnx2_self_test,
7160 .get_strings = bnx2_get_strings,
7161 .phys_id = bnx2_phys_id,
b6016b76 7162 .get_ethtool_stats = bnx2_get_ethtool_stats,
b9f2c044 7163 .get_sset_count = bnx2_get_sset_count,
b6016b76
MC
7164};
7165
7166/* Called with rtnl_lock */
7167static int
7168bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7169{
14ab9b86 7170 struct mii_ioctl_data *data = if_mii(ifr);
972ec0d4 7171 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7172 int err;
7173
7174 switch(cmd) {
7175 case SIOCGMIIPHY:
7176 data->phy_id = bp->phy_addr;
7177
7178 /* fallthru */
7179 case SIOCGMIIREG: {
7180 u32 mii_regval;
7181
583c28e5 7182 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7b6b8347
MC
7183 return -EOPNOTSUPP;
7184
dad3e452
MC
7185 if (!netif_running(dev))
7186 return -EAGAIN;
7187
c770a65c 7188 spin_lock_bh(&bp->phy_lock);
b6016b76 7189 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
c770a65c 7190 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
7191
7192 data->val_out = mii_regval;
7193
7194 return err;
7195 }
7196
7197 case SIOCSMIIREG:
7198 if (!capable(CAP_NET_ADMIN))
7199 return -EPERM;
7200
583c28e5 7201 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7b6b8347
MC
7202 return -EOPNOTSUPP;
7203
dad3e452
MC
7204 if (!netif_running(dev))
7205 return -EAGAIN;
7206
c770a65c 7207 spin_lock_bh(&bp->phy_lock);
b6016b76 7208 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
c770a65c 7209 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
7210
7211 return err;
7212
7213 default:
7214 /* do nothing */
7215 break;
7216 }
7217 return -EOPNOTSUPP;
7218}
7219
7220/* Called with rtnl_lock */
7221static int
7222bnx2_change_mac_addr(struct net_device *dev, void *p)
7223{
7224 struct sockaddr *addr = p;
972ec0d4 7225 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7226
73eef4cd
MC
7227 if (!is_valid_ether_addr(addr->sa_data))
7228 return -EINVAL;
7229
b6016b76
MC
7230 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7231 if (netif_running(dev))
5fcaed01 7232 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
b6016b76
MC
7233
7234 return 0;
7235}
7236
7237/* Called with rtnl_lock */
7238static int
7239bnx2_change_mtu(struct net_device *dev, int new_mtu)
7240{
972ec0d4 7241 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7242
7243 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7244 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7245 return -EINVAL;
7246
7247 dev->mtu = new_mtu;
5d5d0015 7248 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
b6016b76
MC
7249}
7250
7251#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7252static void
7253poll_bnx2(struct net_device *dev)
7254{
972ec0d4 7255 struct bnx2 *bp = netdev_priv(dev);
b2af2c1d 7256 int i;
b6016b76 7257
b2af2c1d
NH
7258 for (i = 0; i < bp->irq_nvecs; i++) {
7259 disable_irq(bp->irq_tbl[i].vector);
7260 bnx2_interrupt(bp->irq_tbl[i].vector, &bp->bnx2_napi[i]);
7261 enable_irq(bp->irq_tbl[i].vector);
7262 }
b6016b76
MC
7263}
7264#endif
7265
253c8b75
MC
7266static void __devinit
7267bnx2_get_5709_media(struct bnx2 *bp)
7268{
7269 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7270 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7271 u32 strap;
7272
7273 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7274 return;
7275 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
583c28e5 7276 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
253c8b75
MC
7277 return;
7278 }
7279
7280 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7281 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7282 else
7283 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7284
7285 if (PCI_FUNC(bp->pdev->devfn) == 0) {
7286 switch (strap) {
7287 case 0x4:
7288 case 0x5:
7289 case 0x6:
583c28e5 7290 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
253c8b75
MC
7291 return;
7292 }
7293 } else {
7294 switch (strap) {
7295 case 0x1:
7296 case 0x2:
7297 case 0x4:
583c28e5 7298 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
253c8b75
MC
7299 return;
7300 }
7301 }
7302}
7303
883e5151
MC
7304static void __devinit
7305bnx2_get_pci_speed(struct bnx2 *bp)
7306{
7307 u32 reg;
7308
7309 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7310 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7311 u32 clkreg;
7312
f86e82fb 7313 bp->flags |= BNX2_FLAG_PCIX;
883e5151
MC
7314
7315 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7316
7317 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7318 switch (clkreg) {
7319 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7320 bp->bus_speed_mhz = 133;
7321 break;
7322
7323 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7324 bp->bus_speed_mhz = 100;
7325 break;
7326
7327 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7328 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7329 bp->bus_speed_mhz = 66;
7330 break;
7331
7332 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7333 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7334 bp->bus_speed_mhz = 50;
7335 break;
7336
7337 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7338 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7339 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7340 bp->bus_speed_mhz = 33;
7341 break;
7342 }
7343 }
7344 else {
7345 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7346 bp->bus_speed_mhz = 66;
7347 else
7348 bp->bus_speed_mhz = 33;
7349 }
7350
7351 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
f86e82fb 7352 bp->flags |= BNX2_FLAG_PCI_32BIT;
883e5151
MC
7353
7354}
7355
b6016b76
MC
7356static int __devinit
7357bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7358{
7359 struct bnx2 *bp;
7360 unsigned long mem_len;
58fc2ea4 7361 int rc, i, j;
b6016b76 7362 u32 reg;
40453c83 7363 u64 dma_mask, persist_dma_mask;
b6016b76 7364
b6016b76 7365 SET_NETDEV_DEV(dev, &pdev->dev);
972ec0d4 7366 bp = netdev_priv(dev);
b6016b76
MC
7367
7368 bp->flags = 0;
7369 bp->phy_flags = 0;
7370
7371 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7372 rc = pci_enable_device(pdev);
7373 if (rc) {
898eb71c 7374 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
b6016b76
MC
7375 goto err_out;
7376 }
7377
7378 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9b91cf9d 7379 dev_err(&pdev->dev,
2e8a538d 7380 "Cannot find PCI device base address, aborting.\n");
b6016b76
MC
7381 rc = -ENODEV;
7382 goto err_out_disable;
7383 }
7384
7385 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7386 if (rc) {
9b91cf9d 7387 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
b6016b76
MC
7388 goto err_out_disable;
7389 }
7390
7391 pci_set_master(pdev);
6ff2da49 7392 pci_save_state(pdev);
b6016b76
MC
7393
7394 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7395 if (bp->pm_cap == 0) {
9b91cf9d 7396 dev_err(&pdev->dev,
2e8a538d 7397 "Cannot find power management capability, aborting.\n");
b6016b76
MC
7398 rc = -EIO;
7399 goto err_out_release;
7400 }
7401
b6016b76
MC
7402 bp->dev = dev;
7403 bp->pdev = pdev;
7404
7405 spin_lock_init(&bp->phy_lock);
1b8227c4 7406 spin_lock_init(&bp->indirect_lock);
c4028958 7407 INIT_WORK(&bp->reset_task, bnx2_reset_task);
b6016b76
MC
7408
7409 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
706bf240 7410 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS);
b6016b76
MC
7411 dev->mem_end = dev->mem_start + mem_len;
7412 dev->irq = pdev->irq;
7413
7414 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7415
7416 if (!bp->regview) {
9b91cf9d 7417 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
b6016b76
MC
7418 rc = -ENOMEM;
7419 goto err_out_release;
7420 }
7421
7422 /* Configure byte swap and enable write to the reg_window registers.
7423 * Rely on CPU to do target byte swapping on big endian systems
7424 * The chip's target access swapping will not swap all accesses
7425 */
7426 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7427 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7428 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7429
829ca9a3 7430 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
7431
7432 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7433
883e5151
MC
7434 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7435 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7436 dev_err(&pdev->dev,
7437 "Cannot find PCIE capability, aborting.\n");
7438 rc = -EIO;
7439 goto err_out_unmap;
7440 }
f86e82fb 7441 bp->flags |= BNX2_FLAG_PCIE;
2dd201d7 7442 if (CHIP_REV(bp) == CHIP_REV_Ax)
f86e82fb 7443 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
883e5151 7444 } else {
59b47d8a
MC
7445 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7446 if (bp->pcix_cap == 0) {
7447 dev_err(&pdev->dev,
7448 "Cannot find PCIX capability, aborting.\n");
7449 rc = -EIO;
7450 goto err_out_unmap;
7451 }
7452 }
7453
b4b36042
MC
7454 if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7455 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
f86e82fb 7456 bp->flags |= BNX2_FLAG_MSIX_CAP;
b4b36042
MC
7457 }
7458
8e6a72c4
MC
7459 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7460 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
f86e82fb 7461 bp->flags |= BNX2_FLAG_MSI_CAP;
8e6a72c4
MC
7462 }
7463
40453c83
MC
7464 /* 5708 cannot support DMA addresses > 40-bit. */
7465 if (CHIP_NUM(bp) == CHIP_NUM_5708)
7466 persist_dma_mask = dma_mask = DMA_40BIT_MASK;
7467 else
7468 persist_dma_mask = dma_mask = DMA_64BIT_MASK;
7469
7470 /* Configure DMA attributes. */
7471 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7472 dev->features |= NETIF_F_HIGHDMA;
7473 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7474 if (rc) {
7475 dev_err(&pdev->dev,
7476 "pci_set_consistent_dma_mask failed, aborting.\n");
7477 goto err_out_unmap;
7478 }
7479 } else if ((rc = pci_set_dma_mask(pdev, DMA_32BIT_MASK)) != 0) {
7480 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7481 goto err_out_unmap;
7482 }
7483
f86e82fb 7484 if (!(bp->flags & BNX2_FLAG_PCIE))
883e5151 7485 bnx2_get_pci_speed(bp);
b6016b76
MC
7486
7487 /* 5706A0 may falsely detect SERR and PERR. */
7488 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7489 reg = REG_RD(bp, PCI_COMMAND);
7490 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7491 REG_WR(bp, PCI_COMMAND, reg);
7492 }
7493 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
f86e82fb 7494 !(bp->flags & BNX2_FLAG_PCIX)) {
b6016b76 7495
9b91cf9d 7496 dev_err(&pdev->dev,
2e8a538d 7497 "5706 A1 can only be used in a PCIX bus, aborting.\n");
b6016b76
MC
7498 goto err_out_unmap;
7499 }
7500
7501 bnx2_init_nvram(bp);
7502
2726d6e1 7503 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
e3648b3d
MC
7504
7505 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
24cb230b
MC
7506 BNX2_SHM_HDR_SIGNATURE_SIG) {
7507 u32 off = PCI_FUNC(pdev->devfn) << 2;
7508
2726d6e1 7509 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
24cb230b 7510 } else
e3648b3d
MC
7511 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7512
b6016b76
MC
7513 /* Get the permanent MAC address. First we need to make sure the
7514 * firmware is actually running.
7515 */
2726d6e1 7516 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
b6016b76
MC
7517
7518 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7519 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
9b91cf9d 7520 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
b6016b76
MC
7521 rc = -ENODEV;
7522 goto err_out_unmap;
7523 }
7524
2726d6e1 7525 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
58fc2ea4
MC
7526 for (i = 0, j = 0; i < 3; i++) {
7527 u8 num, k, skip0;
7528
7529 num = (u8) (reg >> (24 - (i * 8)));
7530 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7531 if (num >= k || !skip0 || k == 1) {
7532 bp->fw_version[j++] = (num / k) + '0';
7533 skip0 = 0;
7534 }
7535 }
7536 if (i != 2)
7537 bp->fw_version[j++] = '.';
7538 }
2726d6e1 7539 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
846f5c62
MC
7540 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7541 bp->wol = 1;
7542
7543 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
f86e82fb 7544 bp->flags |= BNX2_FLAG_ASF_ENABLE;
c2d3db8c
MC
7545
7546 for (i = 0; i < 30; i++) {
2726d6e1 7547 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
c2d3db8c
MC
7548 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7549 break;
7550 msleep(10);
7551 }
7552 }
2726d6e1 7553 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
58fc2ea4
MC
7554 reg &= BNX2_CONDITION_MFW_RUN_MASK;
7555 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7556 reg != BNX2_CONDITION_MFW_RUN_NONE) {
2726d6e1 7557 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
58fc2ea4
MC
7558
7559 bp->fw_version[j++] = ' ';
7560 for (i = 0; i < 3; i++) {
2726d6e1 7561 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
58fc2ea4
MC
7562 reg = swab32(reg);
7563 memcpy(&bp->fw_version[j], &reg, 4);
7564 j += 4;
7565 }
7566 }
b6016b76 7567
2726d6e1 7568 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
b6016b76
MC
7569 bp->mac_addr[0] = (u8) (reg >> 8);
7570 bp->mac_addr[1] = (u8) reg;
7571
2726d6e1 7572 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
b6016b76
MC
7573 bp->mac_addr[2] = (u8) (reg >> 24);
7574 bp->mac_addr[3] = (u8) (reg >> 16);
7575 bp->mac_addr[4] = (u8) (reg >> 8);
7576 bp->mac_addr[5] = (u8) reg;
7577
7578 bp->tx_ring_size = MAX_TX_DESC_CNT;
932f3772 7579 bnx2_set_rx_ring_size(bp, 255);
b6016b76
MC
7580
7581 bp->rx_csum = 1;
7582
b6016b76
MC
7583 bp->tx_quick_cons_trip_int = 20;
7584 bp->tx_quick_cons_trip = 20;
7585 bp->tx_ticks_int = 80;
7586 bp->tx_ticks = 80;
6aa20a22 7587
b6016b76
MC
7588 bp->rx_quick_cons_trip_int = 6;
7589 bp->rx_quick_cons_trip = 6;
7590 bp->rx_ticks_int = 18;
7591 bp->rx_ticks = 18;
7592
7ea6920e 7593 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
b6016b76 7594
ac392abc 7595 bp->current_interval = BNX2_TIMER_INTERVAL;
b6016b76 7596
5b0c76ad
MC
7597 bp->phy_addr = 1;
7598
b6016b76 7599 /* Disable WOL support if we are running on a SERDES chip. */
253c8b75
MC
7600 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7601 bnx2_get_5709_media(bp);
7602 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
583c28e5 7603 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
bac0dff6 7604
0d8a6571 7605 bp->phy_port = PORT_TP;
583c28e5 7606 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
0d8a6571 7607 bp->phy_port = PORT_FIBRE;
2726d6e1 7608 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
846f5c62 7609 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
f86e82fb 7610 bp->flags |= BNX2_FLAG_NO_WOL;
846f5c62
MC
7611 bp->wol = 0;
7612 }
38ea3686
MC
7613 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
7614 /* Don't do parallel detect on this board because of
7615 * some board problems. The link will not go down
7616 * if we do parallel detect.
7617 */
7618 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
7619 pdev->subsystem_device == 0x310c)
7620 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
7621 } else {
5b0c76ad 7622 bp->phy_addr = 2;
5b0c76ad 7623 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
583c28e5 7624 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
5b0c76ad 7625 }
261dd5ca
MC
7626 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7627 CHIP_NUM(bp) == CHIP_NUM_5708)
583c28e5 7628 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
fb0c18bd
MC
7629 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7630 (CHIP_REV(bp) == CHIP_REV_Ax ||
7631 CHIP_REV(bp) == CHIP_REV_Bx))
583c28e5 7632 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
b6016b76 7633
7c62e83b
MC
7634 bnx2_init_fw_cap(bp);
7635
16088272
MC
7636 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7637 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
5ec6d7bf
MC
7638 (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
7639 !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
f86e82fb 7640 bp->flags |= BNX2_FLAG_NO_WOL;
846f5c62
MC
7641 bp->wol = 0;
7642 }
dda1e390 7643
b6016b76
MC
7644 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7645 bp->tx_quick_cons_trip_int =
7646 bp->tx_quick_cons_trip;
7647 bp->tx_ticks_int = bp->tx_ticks;
7648 bp->rx_quick_cons_trip_int =
7649 bp->rx_quick_cons_trip;
7650 bp->rx_ticks_int = bp->rx_ticks;
7651 bp->comp_prod_trip_int = bp->comp_prod_trip;
7652 bp->com_ticks_int = bp->com_ticks;
7653 bp->cmd_ticks_int = bp->cmd_ticks;
7654 }
7655
f9317a40
MC
7656 /* Disable MSI on 5706 if AMD 8132 bridge is found.
7657 *
7658 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
7659 * with byte enables disabled on the unused 32-bit word. This is legal
7660 * but causes problems on the AMD 8132 which will eventually stop
7661 * responding after a while.
7662 *
7663 * AMD believes this incompatibility is unique to the 5706, and
88187dfa 7664 * prefers to locally disable MSI rather than globally disabling it.
f9317a40
MC
7665 */
7666 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7667 struct pci_dev *amd_8132 = NULL;
7668
7669 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7670 PCI_DEVICE_ID_AMD_8132_BRIDGE,
7671 amd_8132))) {
f9317a40 7672
44c10138
AK
7673 if (amd_8132->revision >= 0x10 &&
7674 amd_8132->revision <= 0x13) {
f9317a40
MC
7675 disable_msi = 1;
7676 pci_dev_put(amd_8132);
7677 break;
7678 }
7679 }
7680 }
7681
deaf391b 7682 bnx2_set_default_link(bp);
b6016b76
MC
7683 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7684
cd339a0e 7685 init_timer(&bp->timer);
ac392abc 7686 bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
cd339a0e
MC
7687 bp->timer.data = (unsigned long) bp;
7688 bp->timer.function = bnx2_timer;
7689
b6016b76
MC
7690 return 0;
7691
7692err_out_unmap:
7693 if (bp->regview) {
7694 iounmap(bp->regview);
73eef4cd 7695 bp->regview = NULL;
b6016b76
MC
7696 }
7697
7698err_out_release:
7699 pci_release_regions(pdev);
7700
7701err_out_disable:
7702 pci_disable_device(pdev);
7703 pci_set_drvdata(pdev, NULL);
7704
7705err_out:
7706 return rc;
7707}
7708
883e5151
MC
7709static char * __devinit
7710bnx2_bus_string(struct bnx2 *bp, char *str)
7711{
7712 char *s = str;
7713
f86e82fb 7714 if (bp->flags & BNX2_FLAG_PCIE) {
883e5151
MC
7715 s += sprintf(s, "PCI Express");
7716 } else {
7717 s += sprintf(s, "PCI");
f86e82fb 7718 if (bp->flags & BNX2_FLAG_PCIX)
883e5151 7719 s += sprintf(s, "-X");
f86e82fb 7720 if (bp->flags & BNX2_FLAG_PCI_32BIT)
883e5151
MC
7721 s += sprintf(s, " 32-bit");
7722 else
7723 s += sprintf(s, " 64-bit");
7724 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
7725 }
7726 return str;
7727}
7728
2ba582b7 7729static void __devinit
35efa7c1
MC
7730bnx2_init_napi(struct bnx2 *bp)
7731{
b4b36042 7732 int i;
35efa7c1 7733
b4b36042 7734 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
35e9010b
MC
7735 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
7736 int (*poll)(struct napi_struct *, int);
7737
7738 if (i == 0)
7739 poll = bnx2_poll;
7740 else
f0ea2e63 7741 poll = bnx2_poll_msix;
35e9010b
MC
7742
7743 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
b4b36042
MC
7744 bnapi->bp = bp;
7745 }
35efa7c1
MC
7746}
7747
0421eae6
SH
7748static const struct net_device_ops bnx2_netdev_ops = {
7749 .ndo_open = bnx2_open,
7750 .ndo_start_xmit = bnx2_start_xmit,
7751 .ndo_stop = bnx2_close,
7752 .ndo_get_stats = bnx2_get_stats,
7753 .ndo_set_rx_mode = bnx2_set_rx_mode,
7754 .ndo_do_ioctl = bnx2_ioctl,
7755 .ndo_validate_addr = eth_validate_addr,
7756 .ndo_set_mac_address = bnx2_change_mac_addr,
7757 .ndo_change_mtu = bnx2_change_mtu,
7758 .ndo_tx_timeout = bnx2_tx_timeout,
7759#ifdef BCM_VLAN
7760 .ndo_vlan_rx_register = bnx2_vlan_rx_register,
7761#endif
7762#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7763 .ndo_poll_controller = poll_bnx2,
7764#endif
7765};
7766
b6016b76
MC
7767static int __devinit
7768bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
7769{
7770 static int version_printed = 0;
7771 struct net_device *dev = NULL;
7772 struct bnx2 *bp;
0795af57 7773 int rc;
883e5151 7774 char str[40];
b6016b76
MC
7775
7776 if (version_printed++ == 0)
7777 printk(KERN_INFO "%s", version);
7778
7779 /* dev zeroed in init_etherdev */
706bf240 7780 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
b6016b76
MC
7781
7782 if (!dev)
7783 return -ENOMEM;
7784
7785 rc = bnx2_init_board(pdev, dev);
7786 if (rc < 0) {
7787 free_netdev(dev);
7788 return rc;
7789 }
7790
0421eae6 7791 dev->netdev_ops = &bnx2_netdev_ops;
b6016b76 7792 dev->watchdog_timeo = TX_TIMEOUT;
b6016b76 7793 dev->ethtool_ops = &bnx2_ethtool_ops;
b6016b76 7794
972ec0d4 7795 bp = netdev_priv(dev);
35efa7c1 7796 bnx2_init_napi(bp);
b6016b76 7797
1b2f922f
MC
7798 pci_set_drvdata(pdev, dev);
7799
7800 memcpy(dev->dev_addr, bp->mac_addr, 6);
7801 memcpy(dev->perm_addr, bp->mac_addr, 6);
1b2f922f 7802
d212f87b 7803 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
4666f87a 7804 if (CHIP_NUM(bp) == CHIP_NUM_5709)
d212f87b
SH
7805 dev->features |= NETIF_F_IPV6_CSUM;
7806
1b2f922f
MC
7807#ifdef BCM_VLAN
7808 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7809#endif
7810 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
4666f87a
MC
7811 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7812 dev->features |= NETIF_F_TSO6;
1b2f922f 7813
b6016b76 7814 if ((rc = register_netdev(dev))) {
9b91cf9d 7815 dev_err(&pdev->dev, "Cannot register net device\n");
b6016b76
MC
7816 if (bp->regview)
7817 iounmap(bp->regview);
7818 pci_release_regions(pdev);
7819 pci_disable_device(pdev);
7820 pci_set_drvdata(pdev, NULL);
7821 free_netdev(dev);
7822 return rc;
7823 }
7824
883e5151 7825 printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
e174961c 7826 "IRQ %d, node addr %pM\n",
b6016b76 7827 dev->name,
fbbf68b7 7828 board_info[ent->driver_data].name,
b6016b76
MC
7829 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7830 ((CHIP_ID(bp) & 0x0ff0) >> 4),
883e5151 7831 bnx2_bus_string(bp, str),
b6016b76 7832 dev->base_addr,
e174961c 7833 bp->pdev->irq, dev->dev_addr);
b6016b76 7834
b6016b76
MC
7835 return 0;
7836}
7837
7838static void __devexit
7839bnx2_remove_one(struct pci_dev *pdev)
7840{
7841 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 7842 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7843
afdc08b9
MC
7844 flush_scheduled_work();
7845
b6016b76
MC
7846 unregister_netdev(dev);
7847
7848 if (bp->regview)
7849 iounmap(bp->regview);
7850
7851 free_netdev(dev);
7852 pci_release_regions(pdev);
7853 pci_disable_device(pdev);
7854 pci_set_drvdata(pdev, NULL);
7855}
7856
7857static int
829ca9a3 7858bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
b6016b76
MC
7859{
7860 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 7861 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7862
6caebb02
MC
7863 /* PCI register 4 needs to be saved whether netif_running() or not.
7864 * MSI address and data need to be saved if using MSI and
7865 * netif_running().
7866 */
7867 pci_save_state(pdev);
b6016b76
MC
7868 if (!netif_running(dev))
7869 return 0;
7870
1d60290f 7871 flush_scheduled_work();
b6016b76
MC
7872 bnx2_netif_stop(bp);
7873 netif_device_detach(dev);
7874 del_timer_sync(&bp->timer);
74bf4ba3 7875 bnx2_shutdown_chip(bp);
b6016b76 7876 bnx2_free_skbs(bp);
829ca9a3 7877 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
b6016b76
MC
7878 return 0;
7879}
7880
7881static int
7882bnx2_resume(struct pci_dev *pdev)
7883{
7884 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 7885 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7886
6caebb02 7887 pci_restore_state(pdev);
b6016b76
MC
7888 if (!netif_running(dev))
7889 return 0;
7890
829ca9a3 7891 bnx2_set_power_state(bp, PCI_D0);
b6016b76 7892 netif_device_attach(dev);
9a120bc5 7893 bnx2_init_nic(bp, 1);
b6016b76
MC
7894 bnx2_netif_start(bp);
7895 return 0;
7896}
7897
6ff2da49
WX
7898/**
7899 * bnx2_io_error_detected - called when PCI error is detected
7900 * @pdev: Pointer to PCI device
7901 * @state: The current pci connection state
7902 *
7903 * This function is called after a PCI bus error affecting
7904 * this device has been detected.
7905 */
7906static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
7907 pci_channel_state_t state)
7908{
7909 struct net_device *dev = pci_get_drvdata(pdev);
7910 struct bnx2 *bp = netdev_priv(dev);
7911
7912 rtnl_lock();
7913 netif_device_detach(dev);
7914
7915 if (netif_running(dev)) {
7916 bnx2_netif_stop(bp);
7917 del_timer_sync(&bp->timer);
7918 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
7919 }
7920
7921 pci_disable_device(pdev);
7922 rtnl_unlock();
7923
7924 /* Request a slot slot reset. */
7925 return PCI_ERS_RESULT_NEED_RESET;
7926}
7927
7928/**
7929 * bnx2_io_slot_reset - called after the pci bus has been reset.
7930 * @pdev: Pointer to PCI device
7931 *
7932 * Restart the card from scratch, as if from a cold-boot.
7933 */
7934static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
7935{
7936 struct net_device *dev = pci_get_drvdata(pdev);
7937 struct bnx2 *bp = netdev_priv(dev);
7938
7939 rtnl_lock();
7940 if (pci_enable_device(pdev)) {
7941 dev_err(&pdev->dev,
7942 "Cannot re-enable PCI device after reset.\n");
7943 rtnl_unlock();
7944 return PCI_ERS_RESULT_DISCONNECT;
7945 }
7946 pci_set_master(pdev);
7947 pci_restore_state(pdev);
7948
7949 if (netif_running(dev)) {
7950 bnx2_set_power_state(bp, PCI_D0);
7951 bnx2_init_nic(bp, 1);
7952 }
7953
7954 rtnl_unlock();
7955 return PCI_ERS_RESULT_RECOVERED;
7956}
7957
7958/**
7959 * bnx2_io_resume - called when traffic can start flowing again.
7960 * @pdev: Pointer to PCI device
7961 *
7962 * This callback is called when the error recovery driver tells us that
7963 * its OK to resume normal operation.
7964 */
7965static void bnx2_io_resume(struct pci_dev *pdev)
7966{
7967 struct net_device *dev = pci_get_drvdata(pdev);
7968 struct bnx2 *bp = netdev_priv(dev);
7969
7970 rtnl_lock();
7971 if (netif_running(dev))
7972 bnx2_netif_start(bp);
7973
7974 netif_device_attach(dev);
7975 rtnl_unlock();
7976}
7977
7978static struct pci_error_handlers bnx2_err_handler = {
7979 .error_detected = bnx2_io_error_detected,
7980 .slot_reset = bnx2_io_slot_reset,
7981 .resume = bnx2_io_resume,
7982};
7983
b6016b76 7984static struct pci_driver bnx2_pci_driver = {
14ab9b86
PH
7985 .name = DRV_MODULE_NAME,
7986 .id_table = bnx2_pci_tbl,
7987 .probe = bnx2_init_one,
7988 .remove = __devexit_p(bnx2_remove_one),
7989 .suspend = bnx2_suspend,
7990 .resume = bnx2_resume,
6ff2da49 7991 .err_handler = &bnx2_err_handler,
b6016b76
MC
7992};
7993
7994static int __init bnx2_init(void)
7995{
29917620 7996 return pci_register_driver(&bnx2_pci_driver);
b6016b76
MC
7997}
7998
7999static void __exit bnx2_cleanup(void)
8000{
8001 pci_unregister_driver(&bnx2_pci_driver);
8002}
8003
8004module_init(bnx2_init);
8005module_exit(bnx2_cleanup);
8006
8007
8008