]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2.c
Merge branch 'master' of git://dev.medozas.de/linux
[net-next-2.6.git] / drivers / net / bnx2.c
CommitLineData
b6016b76
MC
1/* bnx2.c: Broadcom NX2 network driver.
2 *
bec92044 3 * Copyright (c) 2004-2010 Broadcom Corporation
b6016b76
MC
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
9 * Written by: Michael Chan (mchan@broadcom.com)
10 */
11
3a9c6a49 12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
f2a4f052
MC
13
14#include <linux/module.h>
15#include <linux/moduleparam.h>
16
17#include <linux/kernel.h>
18#include <linux/timer.h>
19#include <linux/errno.h>
20#include <linux/ioport.h>
21#include <linux/slab.h>
22#include <linux/vmalloc.h>
23#include <linux/interrupt.h>
24#include <linux/pci.h>
25#include <linux/init.h>
26#include <linux/netdevice.h>
27#include <linux/etherdevice.h>
28#include <linux/skbuff.h>
29#include <linux/dma-mapping.h>
1977f032 30#include <linux/bitops.h>
f2a4f052
MC
31#include <asm/io.h>
32#include <asm/irq.h>
33#include <linux/delay.h>
34#include <asm/byteorder.h>
c86a31f4 35#include <asm/page.h>
f2a4f052
MC
36#include <linux/time.h>
37#include <linux/ethtool.h>
38#include <linux/mii.h>
f2a4f052 39#include <linux/if_vlan.h>
08013fa3 40#if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
f2a4f052
MC
41#define BCM_VLAN 1
42#endif
f2a4f052 43#include <net/ip.h>
de081fa5 44#include <net/tcp.h>
f2a4f052 45#include <net/checksum.h>
f2a4f052
MC
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
48#include <linux/prefetch.h>
29b12174 49#include <linux/cache.h>
57579f76 50#include <linux/firmware.h>
706bf240 51#include <linux/log2.h>
f2a4f052 52
4edd473f
MC
53#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54#define BCM_CNIC 1
55#include "cnic_if.h"
56#endif
b6016b76
MC
57#include "bnx2.h"
58#include "bnx2_fw.h"
b3448b0b 59
b6016b76 60#define DRV_MODULE_NAME "bnx2"
587611d6
MC
61#define DRV_MODULE_VERSION "2.0.9"
62#define DRV_MODULE_RELDATE "April 27, 2010"
bec92044 63#define FW_MIPS_FILE_06 "bnx2/bnx2-mips-06-5.0.0.j6.fw"
078b0735 64#define FW_RV2P_FILE_06 "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
bec92044
MC
65#define FW_MIPS_FILE_09 "bnx2/bnx2-mips-09-5.0.0.j9.fw"
66#define FW_RV2P_FILE_09_Ax "bnx2/bnx2-rv2p-09ax-5.0.0.j10.fw"
67#define FW_RV2P_FILE_09 "bnx2/bnx2-rv2p-09-5.0.0.j10.fw"
b6016b76
MC
68
69#define RUN_AT(x) (jiffies + (x))
70
71/* Time in jiffies before concluding the transmitter is hung. */
72#define TX_TIMEOUT (5*HZ)
73
fefa8645 74static char version[] __devinitdata =
b6016b76
MC
75 "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76
77MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
453a9c6e 78MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
b6016b76
MC
79MODULE_LICENSE("GPL");
80MODULE_VERSION(DRV_MODULE_VERSION);
57579f76
MC
81MODULE_FIRMWARE(FW_MIPS_FILE_06);
82MODULE_FIRMWARE(FW_RV2P_FILE_06);
83MODULE_FIRMWARE(FW_MIPS_FILE_09);
84MODULE_FIRMWARE(FW_RV2P_FILE_09);
078b0735 85MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
b6016b76
MC
86
87static int disable_msi = 0;
88
89module_param(disable_msi, int, 0);
90MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
91
92typedef enum {
93 BCM5706 = 0,
94 NC370T,
95 NC370I,
96 BCM5706S,
97 NC370F,
5b0c76ad
MC
98 BCM5708,
99 BCM5708S,
bac0dff6 100 BCM5709,
27a005b8 101 BCM5709S,
7bb0a04f 102 BCM5716,
1caacecb 103 BCM5716S,
b6016b76
MC
104} board_t;
105
106/* indexed by board_t, above */
fefa8645 107static struct {
b6016b76
MC
108 char *name;
109} board_info[] __devinitdata = {
110 { "Broadcom NetXtreme II BCM5706 1000Base-T" },
111 { "HP NC370T Multifunction Gigabit Server Adapter" },
112 { "HP NC370i Multifunction Gigabit Server Adapter" },
113 { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
114 { "HP NC370F Multifunction Gigabit Server Adapter" },
5b0c76ad
MC
115 { "Broadcom NetXtreme II BCM5708 1000Base-T" },
116 { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
bac0dff6 117 { "Broadcom NetXtreme II BCM5709 1000Base-T" },
27a005b8 118 { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
7bb0a04f 119 { "Broadcom NetXtreme II BCM5716 1000Base-T" },
1caacecb 120 { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
b6016b76
MC
121 };
122
7bb0a04f 123static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
b6016b76
MC
124 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
125 PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
126 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
127 PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
128 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
5b0c76ad
MC
130 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
b6016b76
MC
132 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
133 PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
134 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
135 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
5b0c76ad
MC
136 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
137 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
bac0dff6
MC
138 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
139 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
27a005b8
MC
140 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
141 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
7bb0a04f
MC
142 { PCI_VENDOR_ID_BROADCOM, 0x163b,
143 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
1caacecb 144 { PCI_VENDOR_ID_BROADCOM, 0x163c,
1f2435e5 145 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
b6016b76
MC
146 { 0, }
147};
148
0ced9d01 149static const struct flash_spec flash_table[] =
b6016b76 150{
e30372c9
MC
151#define BUFFERED_FLAGS (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
152#define NONBUFFERED_FLAGS (BNX2_NV_WREN)
b6016b76 153 /* Slow EEPROM */
37137709 154 {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
e30372c9 155 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
b6016b76
MC
156 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
157 "EEPROM - slow"},
37137709
MC
158 /* Expansion entry 0001 */
159 {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 160 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
161 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
162 "Entry 0001"},
b6016b76
MC
163 /* Saifun SA25F010 (non-buffered flash) */
164 /* strap, cfg1, & write1 need updates */
37137709 165 {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 166 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
b6016b76
MC
167 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
168 "Non-buffered flash (128kB)"},
169 /* Saifun SA25F020 (non-buffered flash) */
170 /* strap, cfg1, & write1 need updates */
37137709 171 {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 172 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
b6016b76
MC
173 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
174 "Non-buffered flash (256kB)"},
37137709
MC
175 /* Expansion entry 0100 */
176 {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 177 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
178 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
179 "Entry 0100"},
180 /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
6aa20a22 181 {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
e30372c9 182 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
37137709
MC
183 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
184 "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
185 /* Entry 0110: ST M45PE20 (non-buffered flash)*/
186 {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
e30372c9 187 NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
37137709
MC
188 ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
189 "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
190 /* Saifun SA25F005 (non-buffered flash) */
191 /* strap, cfg1, & write1 need updates */
192 {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 193 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
194 SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
195 "Non-buffered flash (64kB)"},
196 /* Fast EEPROM */
197 {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
e30372c9 198 BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
37137709
MC
199 SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
200 "EEPROM - fast"},
201 /* Expansion entry 1001 */
202 {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 203 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
204 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
205 "Entry 1001"},
206 /* Expansion entry 1010 */
207 {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 208 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
209 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
210 "Entry 1010"},
211 /* ATMEL AT45DB011B (buffered flash) */
212 {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 213 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
214 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
215 "Buffered flash (128kB)"},
216 /* Expansion entry 1100 */
217 {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 218 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
219 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
220 "Entry 1100"},
221 /* Expansion entry 1101 */
222 {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
e30372c9 223 NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
37137709
MC
224 SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
225 "Entry 1101"},
226 /* Ateml Expansion entry 1110 */
227 {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 228 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
229 BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
230 "Entry 1110 (Atmel)"},
231 /* ATMEL AT45DB021B (buffered flash) */
232 {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
e30372c9 233 BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
37137709
MC
234 BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
235 "Buffered flash (256kB)"},
b6016b76
MC
236};
237
0ced9d01 238static const struct flash_spec flash_5709 = {
e30372c9
MC
239 .flags = BNX2_NV_BUFFERED,
240 .page_bits = BCM5709_FLASH_PAGE_BITS,
241 .page_size = BCM5709_FLASH_PAGE_SIZE,
242 .addr_mask = BCM5709_FLASH_BYTE_ADDR_MASK,
243 .total_size = BUFFERED_FLASH_TOTAL_SIZE*2,
244 .name = "5709 Buffered flash (256kB)",
245};
246
b6016b76
MC
247MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
248
4327ba43
BL
249static void bnx2_init_napi(struct bnx2 *bp);
250
35e9010b 251static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
e89bbf10 252{
2f8af120 253 u32 diff;
e89bbf10 254
2f8af120 255 smp_mb();
faac9c4b
MC
256
257 /* The ring uses 256 indices for 255 entries, one of them
258 * needs to be skipped.
259 */
35e9010b 260 diff = txr->tx_prod - txr->tx_cons;
faac9c4b
MC
261 if (unlikely(diff >= TX_DESC_CNT)) {
262 diff &= 0xffff;
263 if (diff == TX_DESC_CNT)
264 diff = MAX_TX_DESC_CNT;
265 }
e89bbf10
MC
266 return (bp->tx_ring_size - diff);
267}
268
b6016b76
MC
269static u32
270bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
271{
1b8227c4
MC
272 u32 val;
273
274 spin_lock_bh(&bp->indirect_lock);
b6016b76 275 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
1b8227c4
MC
276 val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
277 spin_unlock_bh(&bp->indirect_lock);
278 return val;
b6016b76
MC
279}
280
281static void
282bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
283{
1b8227c4 284 spin_lock_bh(&bp->indirect_lock);
b6016b76
MC
285 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
286 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
1b8227c4 287 spin_unlock_bh(&bp->indirect_lock);
b6016b76
MC
288}
289
2726d6e1
MC
290static void
291bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
292{
293 bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
294}
295
296static u32
297bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
298{
299 return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
300}
301
b6016b76
MC
302static void
303bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
304{
305 offset += cid_addr;
1b8227c4 306 spin_lock_bh(&bp->indirect_lock);
59b47d8a
MC
307 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
308 int i;
309
310 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
311 REG_WR(bp, BNX2_CTX_CTX_CTRL,
312 offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
313 for (i = 0; i < 5; i++) {
59b47d8a
MC
314 val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
315 if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
316 break;
317 udelay(5);
318 }
319 } else {
320 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
321 REG_WR(bp, BNX2_CTX_DATA, val);
322 }
1b8227c4 323 spin_unlock_bh(&bp->indirect_lock);
b6016b76
MC
324}
325
4edd473f
MC
326#ifdef BCM_CNIC
327static int
328bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
329{
330 struct bnx2 *bp = netdev_priv(dev);
331 struct drv_ctl_io *io = &info->data.io;
332
333 switch (info->cmd) {
334 case DRV_CTL_IO_WR_CMD:
335 bnx2_reg_wr_ind(bp, io->offset, io->data);
336 break;
337 case DRV_CTL_IO_RD_CMD:
338 io->data = bnx2_reg_rd_ind(bp, io->offset);
339 break;
340 case DRV_CTL_CTX_WR_CMD:
341 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
342 break;
343 default:
344 return -EINVAL;
345 }
346 return 0;
347}
348
349static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
350{
351 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
352 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
353 int sb_id;
354
355 if (bp->flags & BNX2_FLAG_USING_MSIX) {
356 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
357 bnapi->cnic_present = 0;
358 sb_id = bp->irq_nvecs;
359 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
360 } else {
361 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
362 bnapi->cnic_tag = bnapi->last_status_idx;
363 bnapi->cnic_present = 1;
364 sb_id = 0;
365 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
366 }
367
368 cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
369 cp->irq_arr[0].status_blk = (void *)
370 ((unsigned long) bnapi->status_blk.msi +
371 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
372 cp->irq_arr[0].status_blk_num = sb_id;
373 cp->num_irq = 1;
374}
375
376static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
377 void *data)
378{
379 struct bnx2 *bp = netdev_priv(dev);
380 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
381
382 if (ops == NULL)
383 return -EINVAL;
384
385 if (cp->drv_state & CNIC_DRV_STATE_REGD)
386 return -EBUSY;
387
388 bp->cnic_data = data;
389 rcu_assign_pointer(bp->cnic_ops, ops);
390
391 cp->num_irq = 0;
392 cp->drv_state = CNIC_DRV_STATE_REGD;
393
394 bnx2_setup_cnic_irq_info(bp);
395
396 return 0;
397}
398
399static int bnx2_unregister_cnic(struct net_device *dev)
400{
401 struct bnx2 *bp = netdev_priv(dev);
402 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
403 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
404
c5a88950 405 mutex_lock(&bp->cnic_lock);
4edd473f
MC
406 cp->drv_state = 0;
407 bnapi->cnic_present = 0;
408 rcu_assign_pointer(bp->cnic_ops, NULL);
c5a88950 409 mutex_unlock(&bp->cnic_lock);
4edd473f
MC
410 synchronize_rcu();
411 return 0;
412}
413
414struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
415{
416 struct bnx2 *bp = netdev_priv(dev);
417 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
418
419 cp->drv_owner = THIS_MODULE;
420 cp->chip_id = bp->chip_id;
421 cp->pdev = bp->pdev;
422 cp->io_base = bp->regview;
423 cp->drv_ctl = bnx2_drv_ctl;
424 cp->drv_register_cnic = bnx2_register_cnic;
425 cp->drv_unregister_cnic = bnx2_unregister_cnic;
426
427 return cp;
428}
429EXPORT_SYMBOL(bnx2_cnic_probe);
430
431static void
432bnx2_cnic_stop(struct bnx2 *bp)
433{
434 struct cnic_ops *c_ops;
435 struct cnic_ctl_info info;
436
c5a88950
MC
437 mutex_lock(&bp->cnic_lock);
438 c_ops = bp->cnic_ops;
4edd473f
MC
439 if (c_ops) {
440 info.cmd = CNIC_CTL_STOP_CMD;
441 c_ops->cnic_ctl(bp->cnic_data, &info);
442 }
c5a88950 443 mutex_unlock(&bp->cnic_lock);
4edd473f
MC
444}
445
446static void
447bnx2_cnic_start(struct bnx2 *bp)
448{
449 struct cnic_ops *c_ops;
450 struct cnic_ctl_info info;
451
c5a88950
MC
452 mutex_lock(&bp->cnic_lock);
453 c_ops = bp->cnic_ops;
4edd473f
MC
454 if (c_ops) {
455 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
456 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
457
458 bnapi->cnic_tag = bnapi->last_status_idx;
459 }
460 info.cmd = CNIC_CTL_START_CMD;
461 c_ops->cnic_ctl(bp->cnic_data, &info);
462 }
c5a88950 463 mutex_unlock(&bp->cnic_lock);
4edd473f
MC
464}
465
466#else
467
468static void
469bnx2_cnic_stop(struct bnx2 *bp)
470{
471}
472
473static void
474bnx2_cnic_start(struct bnx2 *bp)
475{
476}
477
478#endif
479
b6016b76
MC
480static int
481bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
482{
483 u32 val1;
484 int i, ret;
485
583c28e5 486 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
b6016b76
MC
487 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
488 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
489
490 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
491 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
492
493 udelay(40);
494 }
495
496 val1 = (bp->phy_addr << 21) | (reg << 16) |
497 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
498 BNX2_EMAC_MDIO_COMM_START_BUSY;
499 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
500
501 for (i = 0; i < 50; i++) {
502 udelay(10);
503
504 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
505 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
506 udelay(5);
507
508 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
509 val1 &= BNX2_EMAC_MDIO_COMM_DATA;
510
511 break;
512 }
513 }
514
515 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
516 *val = 0x0;
517 ret = -EBUSY;
518 }
519 else {
520 *val = val1;
521 ret = 0;
522 }
523
583c28e5 524 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
b6016b76
MC
525 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
526 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
527
528 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
529 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
530
531 udelay(40);
532 }
533
534 return ret;
535}
536
537static int
538bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
539{
540 u32 val1;
541 int i, ret;
542
583c28e5 543 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
b6016b76
MC
544 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
545 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
546
547 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
548 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
549
550 udelay(40);
551 }
552
553 val1 = (bp->phy_addr << 21) | (reg << 16) | val |
554 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
555 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
556 REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
6aa20a22 557
b6016b76
MC
558 for (i = 0; i < 50; i++) {
559 udelay(10);
560
561 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
562 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
563 udelay(5);
564 break;
565 }
566 }
567
568 if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
569 ret = -EBUSY;
570 else
571 ret = 0;
572
583c28e5 573 if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
b6016b76
MC
574 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
575 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
576
577 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
578 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
579
580 udelay(40);
581 }
582
583 return ret;
584}
585
586static void
587bnx2_disable_int(struct bnx2 *bp)
588{
b4b36042
MC
589 int i;
590 struct bnx2_napi *bnapi;
591
592 for (i = 0; i < bp->irq_nvecs; i++) {
593 bnapi = &bp->bnx2_napi[i];
594 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
595 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
596 }
b6016b76
MC
597 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
598}
599
600static void
601bnx2_enable_int(struct bnx2 *bp)
602{
b4b36042
MC
603 int i;
604 struct bnx2_napi *bnapi;
35efa7c1 605
b4b36042
MC
606 for (i = 0; i < bp->irq_nvecs; i++) {
607 bnapi = &bp->bnx2_napi[i];
1269a8a6 608
b4b36042
MC
609 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
610 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
611 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
612 bnapi->last_status_idx);
b6016b76 613
b4b36042
MC
614 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
615 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
616 bnapi->last_status_idx);
617 }
bf5295bb 618 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
b6016b76
MC
619}
620
621static void
622bnx2_disable_int_sync(struct bnx2 *bp)
623{
b4b36042
MC
624 int i;
625
b6016b76 626 atomic_inc(&bp->intr_sem);
3767546c
MC
627 if (!netif_running(bp->dev))
628 return;
629
b6016b76 630 bnx2_disable_int(bp);
b4b36042
MC
631 for (i = 0; i < bp->irq_nvecs; i++)
632 synchronize_irq(bp->irq_tbl[i].vector);
b6016b76
MC
633}
634
35efa7c1
MC
635static void
636bnx2_napi_disable(struct bnx2 *bp)
637{
b4b36042
MC
638 int i;
639
640 for (i = 0; i < bp->irq_nvecs; i++)
641 napi_disable(&bp->bnx2_napi[i].napi);
35efa7c1
MC
642}
643
644static void
645bnx2_napi_enable(struct bnx2 *bp)
646{
b4b36042
MC
647 int i;
648
649 for (i = 0; i < bp->irq_nvecs; i++)
650 napi_enable(&bp->bnx2_napi[i].napi);
35efa7c1
MC
651}
652
b6016b76 653static void
212f9934 654bnx2_netif_stop(struct bnx2 *bp, bool stop_cnic)
b6016b76 655{
212f9934
MC
656 if (stop_cnic)
657 bnx2_cnic_stop(bp);
b6016b76 658 if (netif_running(bp->dev)) {
e6bf95ff
BL
659 int i;
660
35efa7c1 661 bnx2_napi_disable(bp);
b6016b76 662 netif_tx_disable(bp->dev);
e6bf95ff
BL
663 /* prevent tx timeout */
664 for (i = 0; i < bp->dev->num_tx_queues; i++) {
665 struct netdev_queue *txq;
666
667 txq = netdev_get_tx_queue(bp->dev, i);
668 txq->trans_start = jiffies;
669 }
b6016b76 670 }
b7466560 671 bnx2_disable_int_sync(bp);
b6016b76
MC
672}
673
674static void
212f9934 675bnx2_netif_start(struct bnx2 *bp, bool start_cnic)
b6016b76
MC
676{
677 if (atomic_dec_and_test(&bp->intr_sem)) {
678 if (netif_running(bp->dev)) {
706bf240 679 netif_tx_wake_all_queues(bp->dev);
35efa7c1 680 bnx2_napi_enable(bp);
b6016b76 681 bnx2_enable_int(bp);
212f9934
MC
682 if (start_cnic)
683 bnx2_cnic_start(bp);
b6016b76
MC
684 }
685 }
686}
687
35e9010b
MC
688static void
689bnx2_free_tx_mem(struct bnx2 *bp)
690{
691 int i;
692
693 for (i = 0; i < bp->num_tx_rings; i++) {
694 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
695 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
696
697 if (txr->tx_desc_ring) {
698 pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
699 txr->tx_desc_ring,
700 txr->tx_desc_mapping);
701 txr->tx_desc_ring = NULL;
702 }
703 kfree(txr->tx_buf_ring);
704 txr->tx_buf_ring = NULL;
705 }
706}
707
bb4f98ab
MC
708static void
709bnx2_free_rx_mem(struct bnx2 *bp)
710{
711 int i;
712
713 for (i = 0; i < bp->num_rx_rings; i++) {
714 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
715 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
716 int j;
717
718 for (j = 0; j < bp->rx_max_ring; j++) {
719 if (rxr->rx_desc_ring[j])
720 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
721 rxr->rx_desc_ring[j],
722 rxr->rx_desc_mapping[j]);
723 rxr->rx_desc_ring[j] = NULL;
724 }
25b0b999 725 vfree(rxr->rx_buf_ring);
bb4f98ab
MC
726 rxr->rx_buf_ring = NULL;
727
728 for (j = 0; j < bp->rx_max_pg_ring; j++) {
729 if (rxr->rx_pg_desc_ring[j])
730 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
3298a738
MC
731 rxr->rx_pg_desc_ring[j],
732 rxr->rx_pg_desc_mapping[j]);
733 rxr->rx_pg_desc_ring[j] = NULL;
bb4f98ab 734 }
25b0b999 735 vfree(rxr->rx_pg_ring);
bb4f98ab
MC
736 rxr->rx_pg_ring = NULL;
737 }
738}
739
35e9010b
MC
740static int
741bnx2_alloc_tx_mem(struct bnx2 *bp)
742{
743 int i;
744
745 for (i = 0; i < bp->num_tx_rings; i++) {
746 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
747 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
748
749 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
750 if (txr->tx_buf_ring == NULL)
751 return -ENOMEM;
752
753 txr->tx_desc_ring =
754 pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
755 &txr->tx_desc_mapping);
756 if (txr->tx_desc_ring == NULL)
757 return -ENOMEM;
758 }
759 return 0;
760}
761
bb4f98ab
MC
762static int
763bnx2_alloc_rx_mem(struct bnx2 *bp)
764{
765 int i;
766
767 for (i = 0; i < bp->num_rx_rings; i++) {
768 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
769 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
770 int j;
771
772 rxr->rx_buf_ring =
773 vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
774 if (rxr->rx_buf_ring == NULL)
775 return -ENOMEM;
776
777 memset(rxr->rx_buf_ring, 0,
778 SW_RXBD_RING_SIZE * bp->rx_max_ring);
779
780 for (j = 0; j < bp->rx_max_ring; j++) {
781 rxr->rx_desc_ring[j] =
782 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
783 &rxr->rx_desc_mapping[j]);
784 if (rxr->rx_desc_ring[j] == NULL)
785 return -ENOMEM;
786
787 }
788
789 if (bp->rx_pg_ring_size) {
790 rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
791 bp->rx_max_pg_ring);
792 if (rxr->rx_pg_ring == NULL)
793 return -ENOMEM;
794
795 memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
796 bp->rx_max_pg_ring);
797 }
798
799 for (j = 0; j < bp->rx_max_pg_ring; j++) {
800 rxr->rx_pg_desc_ring[j] =
801 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
802 &rxr->rx_pg_desc_mapping[j]);
803 if (rxr->rx_pg_desc_ring[j] == NULL)
804 return -ENOMEM;
805
806 }
807 }
808 return 0;
809}
810
b6016b76
MC
811static void
812bnx2_free_mem(struct bnx2 *bp)
813{
13daffa2 814 int i;
43e80b89 815 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
13daffa2 816
35e9010b 817 bnx2_free_tx_mem(bp);
bb4f98ab 818 bnx2_free_rx_mem(bp);
35e9010b 819
59b47d8a
MC
820 for (i = 0; i < bp->ctx_pages; i++) {
821 if (bp->ctx_blk[i]) {
822 pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
823 bp->ctx_blk[i],
824 bp->ctx_blk_mapping[i]);
825 bp->ctx_blk[i] = NULL;
826 }
827 }
43e80b89 828 if (bnapi->status_blk.msi) {
0f31f994 829 pci_free_consistent(bp->pdev, bp->status_stats_size,
43e80b89
MC
830 bnapi->status_blk.msi,
831 bp->status_blk_mapping);
832 bnapi->status_blk.msi = NULL;
0f31f994 833 bp->stats_blk = NULL;
b6016b76 834 }
b6016b76
MC
835}
836
837static int
838bnx2_alloc_mem(struct bnx2 *bp)
839{
35e9010b 840 int i, status_blk_size, err;
43e80b89
MC
841 struct bnx2_napi *bnapi;
842 void *status_blk;
b6016b76 843
0f31f994
MC
844 /* Combine status and statistics blocks into one allocation. */
845 status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
f86e82fb 846 if (bp->flags & BNX2_FLAG_MSIX_CAP)
b4b36042
MC
847 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
848 BNX2_SBLK_MSIX_ALIGN_SIZE);
0f31f994
MC
849 bp->status_stats_size = status_blk_size +
850 sizeof(struct statistics_block);
851
43e80b89
MC
852 status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
853 &bp->status_blk_mapping);
854 if (status_blk == NULL)
b6016b76
MC
855 goto alloc_mem_err;
856
43e80b89 857 memset(status_blk, 0, bp->status_stats_size);
b6016b76 858
43e80b89
MC
859 bnapi = &bp->bnx2_napi[0];
860 bnapi->status_blk.msi = status_blk;
861 bnapi->hw_tx_cons_ptr =
862 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
863 bnapi->hw_rx_cons_ptr =
864 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
f86e82fb 865 if (bp->flags & BNX2_FLAG_MSIX_CAP) {
b4b36042 866 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
43e80b89
MC
867 struct status_block_msix *sblk;
868
869 bnapi = &bp->bnx2_napi[i];
b4b36042 870
43e80b89
MC
871 sblk = (void *) (status_blk +
872 BNX2_SBLK_MSIX_ALIGN_SIZE * i);
873 bnapi->status_blk.msix = sblk;
874 bnapi->hw_tx_cons_ptr =
875 &sblk->status_tx_quick_consumer_index;
876 bnapi->hw_rx_cons_ptr =
877 &sblk->status_rx_quick_consumer_index;
b4b36042
MC
878 bnapi->int_num = i << 24;
879 }
880 }
35efa7c1 881
43e80b89 882 bp->stats_blk = status_blk + status_blk_size;
b6016b76 883
0f31f994 884 bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
b6016b76 885
59b47d8a
MC
886 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
887 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
888 if (bp->ctx_pages == 0)
889 bp->ctx_pages = 1;
890 for (i = 0; i < bp->ctx_pages; i++) {
891 bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
892 BCM_PAGE_SIZE,
893 &bp->ctx_blk_mapping[i]);
894 if (bp->ctx_blk[i] == NULL)
895 goto alloc_mem_err;
896 }
897 }
35e9010b 898
bb4f98ab
MC
899 err = bnx2_alloc_rx_mem(bp);
900 if (err)
901 goto alloc_mem_err;
902
35e9010b
MC
903 err = bnx2_alloc_tx_mem(bp);
904 if (err)
905 goto alloc_mem_err;
906
b6016b76
MC
907 return 0;
908
909alloc_mem_err:
910 bnx2_free_mem(bp);
911 return -ENOMEM;
912}
913
e3648b3d
MC
914static void
915bnx2_report_fw_link(struct bnx2 *bp)
916{
917 u32 fw_link_status = 0;
918
583c28e5 919 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
920 return;
921
e3648b3d
MC
922 if (bp->link_up) {
923 u32 bmsr;
924
925 switch (bp->line_speed) {
926 case SPEED_10:
927 if (bp->duplex == DUPLEX_HALF)
928 fw_link_status = BNX2_LINK_STATUS_10HALF;
929 else
930 fw_link_status = BNX2_LINK_STATUS_10FULL;
931 break;
932 case SPEED_100:
933 if (bp->duplex == DUPLEX_HALF)
934 fw_link_status = BNX2_LINK_STATUS_100HALF;
935 else
936 fw_link_status = BNX2_LINK_STATUS_100FULL;
937 break;
938 case SPEED_1000:
939 if (bp->duplex == DUPLEX_HALF)
940 fw_link_status = BNX2_LINK_STATUS_1000HALF;
941 else
942 fw_link_status = BNX2_LINK_STATUS_1000FULL;
943 break;
944 case SPEED_2500:
945 if (bp->duplex == DUPLEX_HALF)
946 fw_link_status = BNX2_LINK_STATUS_2500HALF;
947 else
948 fw_link_status = BNX2_LINK_STATUS_2500FULL;
949 break;
950 }
951
952 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
953
954 if (bp->autoneg) {
955 fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
956
ca58c3af
MC
957 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
958 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
e3648b3d
MC
959
960 if (!(bmsr & BMSR_ANEGCOMPLETE) ||
583c28e5 961 bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
e3648b3d
MC
962 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
963 else
964 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
965 }
966 }
967 else
968 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
969
2726d6e1 970 bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
e3648b3d
MC
971}
972
9b1084b8
MC
973static char *
974bnx2_xceiver_str(struct bnx2 *bp)
975{
976 return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
583c28e5 977 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
9b1084b8
MC
978 "Copper"));
979}
980
b6016b76
MC
981static void
982bnx2_report_link(struct bnx2 *bp)
983{
984 if (bp->link_up) {
985 netif_carrier_on(bp->dev);
3a9c6a49
JP
986 netdev_info(bp->dev, "NIC %s Link is Up, %d Mbps %s duplex",
987 bnx2_xceiver_str(bp),
988 bp->line_speed,
989 bp->duplex == DUPLEX_FULL ? "full" : "half");
b6016b76
MC
990
991 if (bp->flow_ctrl) {
992 if (bp->flow_ctrl & FLOW_CTRL_RX) {
3a9c6a49 993 pr_cont(", receive ");
b6016b76 994 if (bp->flow_ctrl & FLOW_CTRL_TX)
3a9c6a49 995 pr_cont("& transmit ");
b6016b76
MC
996 }
997 else {
3a9c6a49 998 pr_cont(", transmit ");
b6016b76 999 }
3a9c6a49 1000 pr_cont("flow control ON");
b6016b76 1001 }
3a9c6a49
JP
1002 pr_cont("\n");
1003 } else {
b6016b76 1004 netif_carrier_off(bp->dev);
3a9c6a49
JP
1005 netdev_err(bp->dev, "NIC %s Link is Down\n",
1006 bnx2_xceiver_str(bp));
b6016b76 1007 }
e3648b3d
MC
1008
1009 bnx2_report_fw_link(bp);
b6016b76
MC
1010}
1011
1012static void
1013bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1014{
1015 u32 local_adv, remote_adv;
1016
1017 bp->flow_ctrl = 0;
6aa20a22 1018 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
b6016b76
MC
1019 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1020
1021 if (bp->duplex == DUPLEX_FULL) {
1022 bp->flow_ctrl = bp->req_flow_ctrl;
1023 }
1024 return;
1025 }
1026
1027 if (bp->duplex != DUPLEX_FULL) {
1028 return;
1029 }
1030
583c28e5 1031 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
5b0c76ad
MC
1032 (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1033 u32 val;
1034
1035 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1036 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1037 bp->flow_ctrl |= FLOW_CTRL_TX;
1038 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1039 bp->flow_ctrl |= FLOW_CTRL_RX;
1040 return;
1041 }
1042
ca58c3af
MC
1043 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1044 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76 1045
583c28e5 1046 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
b6016b76
MC
1047 u32 new_local_adv = 0;
1048 u32 new_remote_adv = 0;
1049
1050 if (local_adv & ADVERTISE_1000XPAUSE)
1051 new_local_adv |= ADVERTISE_PAUSE_CAP;
1052 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1053 new_local_adv |= ADVERTISE_PAUSE_ASYM;
1054 if (remote_adv & ADVERTISE_1000XPAUSE)
1055 new_remote_adv |= ADVERTISE_PAUSE_CAP;
1056 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1057 new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1058
1059 local_adv = new_local_adv;
1060 remote_adv = new_remote_adv;
1061 }
1062
1063 /* See Table 28B-3 of 802.3ab-1999 spec. */
1064 if (local_adv & ADVERTISE_PAUSE_CAP) {
1065 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1066 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1067 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1068 }
1069 else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1070 bp->flow_ctrl = FLOW_CTRL_RX;
1071 }
1072 }
1073 else {
1074 if (remote_adv & ADVERTISE_PAUSE_CAP) {
1075 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1076 }
1077 }
1078 }
1079 else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1080 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1081 (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1082
1083 bp->flow_ctrl = FLOW_CTRL_TX;
1084 }
1085 }
1086}
1087
27a005b8
MC
1088static int
1089bnx2_5709s_linkup(struct bnx2 *bp)
1090{
1091 u32 val, speed;
1092
1093 bp->link_up = 1;
1094
1095 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1096 bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1097 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1098
1099 if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1100 bp->line_speed = bp->req_line_speed;
1101 bp->duplex = bp->req_duplex;
1102 return 0;
1103 }
1104 speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1105 switch (speed) {
1106 case MII_BNX2_GP_TOP_AN_SPEED_10:
1107 bp->line_speed = SPEED_10;
1108 break;
1109 case MII_BNX2_GP_TOP_AN_SPEED_100:
1110 bp->line_speed = SPEED_100;
1111 break;
1112 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1113 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1114 bp->line_speed = SPEED_1000;
1115 break;
1116 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1117 bp->line_speed = SPEED_2500;
1118 break;
1119 }
1120 if (val & MII_BNX2_GP_TOP_AN_FD)
1121 bp->duplex = DUPLEX_FULL;
1122 else
1123 bp->duplex = DUPLEX_HALF;
1124 return 0;
1125}
1126
b6016b76 1127static int
5b0c76ad
MC
1128bnx2_5708s_linkup(struct bnx2 *bp)
1129{
1130 u32 val;
1131
1132 bp->link_up = 1;
1133 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1134 switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1135 case BCM5708S_1000X_STAT1_SPEED_10:
1136 bp->line_speed = SPEED_10;
1137 break;
1138 case BCM5708S_1000X_STAT1_SPEED_100:
1139 bp->line_speed = SPEED_100;
1140 break;
1141 case BCM5708S_1000X_STAT1_SPEED_1G:
1142 bp->line_speed = SPEED_1000;
1143 break;
1144 case BCM5708S_1000X_STAT1_SPEED_2G5:
1145 bp->line_speed = SPEED_2500;
1146 break;
1147 }
1148 if (val & BCM5708S_1000X_STAT1_FD)
1149 bp->duplex = DUPLEX_FULL;
1150 else
1151 bp->duplex = DUPLEX_HALF;
1152
1153 return 0;
1154}
1155
1156static int
1157bnx2_5706s_linkup(struct bnx2 *bp)
b6016b76
MC
1158{
1159 u32 bmcr, local_adv, remote_adv, common;
1160
1161 bp->link_up = 1;
1162 bp->line_speed = SPEED_1000;
1163
ca58c3af 1164 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1165 if (bmcr & BMCR_FULLDPLX) {
1166 bp->duplex = DUPLEX_FULL;
1167 }
1168 else {
1169 bp->duplex = DUPLEX_HALF;
1170 }
1171
1172 if (!(bmcr & BMCR_ANENABLE)) {
1173 return 0;
1174 }
1175
ca58c3af
MC
1176 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1177 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
1178
1179 common = local_adv & remote_adv;
1180 if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1181
1182 if (common & ADVERTISE_1000XFULL) {
1183 bp->duplex = DUPLEX_FULL;
1184 }
1185 else {
1186 bp->duplex = DUPLEX_HALF;
1187 }
1188 }
1189
1190 return 0;
1191}
1192
1193static int
1194bnx2_copper_linkup(struct bnx2 *bp)
1195{
1196 u32 bmcr;
1197
ca58c3af 1198 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1199 if (bmcr & BMCR_ANENABLE) {
1200 u32 local_adv, remote_adv, common;
1201
1202 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1203 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1204
1205 common = local_adv & (remote_adv >> 2);
1206 if (common & ADVERTISE_1000FULL) {
1207 bp->line_speed = SPEED_1000;
1208 bp->duplex = DUPLEX_FULL;
1209 }
1210 else if (common & ADVERTISE_1000HALF) {
1211 bp->line_speed = SPEED_1000;
1212 bp->duplex = DUPLEX_HALF;
1213 }
1214 else {
ca58c3af
MC
1215 bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1216 bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
b6016b76
MC
1217
1218 common = local_adv & remote_adv;
1219 if (common & ADVERTISE_100FULL) {
1220 bp->line_speed = SPEED_100;
1221 bp->duplex = DUPLEX_FULL;
1222 }
1223 else if (common & ADVERTISE_100HALF) {
1224 bp->line_speed = SPEED_100;
1225 bp->duplex = DUPLEX_HALF;
1226 }
1227 else if (common & ADVERTISE_10FULL) {
1228 bp->line_speed = SPEED_10;
1229 bp->duplex = DUPLEX_FULL;
1230 }
1231 else if (common & ADVERTISE_10HALF) {
1232 bp->line_speed = SPEED_10;
1233 bp->duplex = DUPLEX_HALF;
1234 }
1235 else {
1236 bp->line_speed = 0;
1237 bp->link_up = 0;
1238 }
1239 }
1240 }
1241 else {
1242 if (bmcr & BMCR_SPEED100) {
1243 bp->line_speed = SPEED_100;
1244 }
1245 else {
1246 bp->line_speed = SPEED_10;
1247 }
1248 if (bmcr & BMCR_FULLDPLX) {
1249 bp->duplex = DUPLEX_FULL;
1250 }
1251 else {
1252 bp->duplex = DUPLEX_HALF;
1253 }
1254 }
1255
1256 return 0;
1257}
1258
83e3fc89 1259static void
bb4f98ab 1260bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
83e3fc89 1261{
bb4f98ab 1262 u32 val, rx_cid_addr = GET_CID_ADDR(cid);
83e3fc89
MC
1263
1264 val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1265 val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1266 val |= 0x02 << 8;
1267
1268 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1269 u32 lo_water, hi_water;
1270
1271 if (bp->flow_ctrl & FLOW_CTRL_TX)
1272 lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1273 else
1274 lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1275 if (lo_water >= bp->rx_ring_size)
1276 lo_water = 0;
1277
5726026b 1278 hi_water = min_t(int, bp->rx_ring_size / 4, lo_water + 16);
83e3fc89
MC
1279
1280 if (hi_water <= lo_water)
1281 lo_water = 0;
1282
1283 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1284 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1285
1286 if (hi_water > 0xf)
1287 hi_water = 0xf;
1288 else if (hi_water == 0)
1289 lo_water = 0;
1290 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1291 }
1292 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1293}
1294
bb4f98ab
MC
1295static void
1296bnx2_init_all_rx_contexts(struct bnx2 *bp)
1297{
1298 int i;
1299 u32 cid;
1300
1301 for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1302 if (i == 1)
1303 cid = RX_RSS_CID;
1304 bnx2_init_rx_context(bp, cid);
1305 }
1306}
1307
344478db 1308static void
b6016b76
MC
1309bnx2_set_mac_link(struct bnx2 *bp)
1310{
1311 u32 val;
1312
1313 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1314 if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1315 (bp->duplex == DUPLEX_HALF)) {
1316 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1317 }
1318
1319 /* Configure the EMAC mode register. */
1320 val = REG_RD(bp, BNX2_EMAC_MODE);
1321
1322 val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
5b0c76ad 1323 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
59b47d8a 1324 BNX2_EMAC_MODE_25G_MODE);
b6016b76
MC
1325
1326 if (bp->link_up) {
5b0c76ad
MC
1327 switch (bp->line_speed) {
1328 case SPEED_10:
59b47d8a
MC
1329 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1330 val |= BNX2_EMAC_MODE_PORT_MII_10M;
5b0c76ad
MC
1331 break;
1332 }
1333 /* fall through */
1334 case SPEED_100:
1335 val |= BNX2_EMAC_MODE_PORT_MII;
1336 break;
1337 case SPEED_2500:
59b47d8a 1338 val |= BNX2_EMAC_MODE_25G_MODE;
5b0c76ad
MC
1339 /* fall through */
1340 case SPEED_1000:
1341 val |= BNX2_EMAC_MODE_PORT_GMII;
1342 break;
1343 }
b6016b76
MC
1344 }
1345 else {
1346 val |= BNX2_EMAC_MODE_PORT_GMII;
1347 }
1348
1349 /* Set the MAC to operate in the appropriate duplex mode. */
1350 if (bp->duplex == DUPLEX_HALF)
1351 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1352 REG_WR(bp, BNX2_EMAC_MODE, val);
1353
1354 /* Enable/disable rx PAUSE. */
1355 bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1356
1357 if (bp->flow_ctrl & FLOW_CTRL_RX)
1358 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1359 REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1360
1361 /* Enable/disable tx PAUSE. */
1362 val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1363 val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1364
1365 if (bp->flow_ctrl & FLOW_CTRL_TX)
1366 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1367 REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1368
1369 /* Acknowledge the interrupt. */
1370 REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1371
83e3fc89 1372 if (CHIP_NUM(bp) == CHIP_NUM_5709)
bb4f98ab 1373 bnx2_init_all_rx_contexts(bp);
b6016b76
MC
1374}
1375
27a005b8
MC
1376static void
1377bnx2_enable_bmsr1(struct bnx2 *bp)
1378{
583c28e5 1379 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
27a005b8
MC
1380 (CHIP_NUM(bp) == CHIP_NUM_5709))
1381 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1382 MII_BNX2_BLK_ADDR_GP_STATUS);
1383}
1384
1385static void
1386bnx2_disable_bmsr1(struct bnx2 *bp)
1387{
583c28e5 1388 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
27a005b8
MC
1389 (CHIP_NUM(bp) == CHIP_NUM_5709))
1390 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1391 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1392}
1393
605a9e20
MC
1394static int
1395bnx2_test_and_enable_2g5(struct bnx2 *bp)
1396{
1397 u32 up1;
1398 int ret = 1;
1399
583c28e5 1400 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
605a9e20
MC
1401 return 0;
1402
1403 if (bp->autoneg & AUTONEG_SPEED)
1404 bp->advertising |= ADVERTISED_2500baseX_Full;
1405
27a005b8
MC
1406 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1407 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1408
605a9e20
MC
1409 bnx2_read_phy(bp, bp->mii_up1, &up1);
1410 if (!(up1 & BCM5708S_UP1_2G5)) {
1411 up1 |= BCM5708S_UP1_2G5;
1412 bnx2_write_phy(bp, bp->mii_up1, up1);
1413 ret = 0;
1414 }
1415
27a005b8
MC
1416 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1417 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1418 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1419
605a9e20
MC
1420 return ret;
1421}
1422
1423static int
1424bnx2_test_and_disable_2g5(struct bnx2 *bp)
1425{
1426 u32 up1;
1427 int ret = 0;
1428
583c28e5 1429 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
605a9e20
MC
1430 return 0;
1431
27a005b8
MC
1432 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1433 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1434
605a9e20
MC
1435 bnx2_read_phy(bp, bp->mii_up1, &up1);
1436 if (up1 & BCM5708S_UP1_2G5) {
1437 up1 &= ~BCM5708S_UP1_2G5;
1438 bnx2_write_phy(bp, bp->mii_up1, up1);
1439 ret = 1;
1440 }
1441
27a005b8
MC
1442 if (CHIP_NUM(bp) == CHIP_NUM_5709)
1443 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1444 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1445
605a9e20
MC
1446 return ret;
1447}
1448
1449static void
1450bnx2_enable_forced_2g5(struct bnx2 *bp)
1451{
1452 u32 bmcr;
1453
583c28e5 1454 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
605a9e20
MC
1455 return;
1456
27a005b8
MC
1457 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1458 u32 val;
1459
1460 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1461 MII_BNX2_BLK_ADDR_SERDES_DIG);
1462 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1463 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1464 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1465 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1466
1467 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1468 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1469 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1470
1471 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1472 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1473 bmcr |= BCM5708S_BMCR_FORCE_2500;
c7079857
ED
1474 } else {
1475 return;
605a9e20
MC
1476 }
1477
1478 if (bp->autoneg & AUTONEG_SPEED) {
1479 bmcr &= ~BMCR_ANENABLE;
1480 if (bp->req_duplex == DUPLEX_FULL)
1481 bmcr |= BMCR_FULLDPLX;
1482 }
1483 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1484}
1485
1486static void
1487bnx2_disable_forced_2g5(struct bnx2 *bp)
1488{
1489 u32 bmcr;
1490
583c28e5 1491 if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
605a9e20
MC
1492 return;
1493
27a005b8
MC
1494 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1495 u32 val;
1496
1497 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1498 MII_BNX2_BLK_ADDR_SERDES_DIG);
1499 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1500 val &= ~MII_BNX2_SD_MISC1_FORCE;
1501 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1502
1503 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1504 MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1505 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1506
1507 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1508 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1509 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
c7079857
ED
1510 } else {
1511 return;
605a9e20
MC
1512 }
1513
1514 if (bp->autoneg & AUTONEG_SPEED)
1515 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1516 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1517}
1518
b2fadeae
MC
1519static void
1520bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1521{
1522 u32 val;
1523
1524 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1525 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1526 if (start)
1527 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1528 else
1529 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1530}
1531
b6016b76
MC
1532static int
1533bnx2_set_link(struct bnx2 *bp)
1534{
1535 u32 bmsr;
1536 u8 link_up;
1537
80be4434 1538 if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
b6016b76
MC
1539 bp->link_up = 1;
1540 return 0;
1541 }
1542
583c28e5 1543 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
1544 return 0;
1545
b6016b76
MC
1546 link_up = bp->link_up;
1547
27a005b8
MC
1548 bnx2_enable_bmsr1(bp);
1549 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1550 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1551 bnx2_disable_bmsr1(bp);
b6016b76 1552
583c28e5 1553 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
b6016b76 1554 (CHIP_NUM(bp) == CHIP_NUM_5706)) {
a2724e25 1555 u32 val, an_dbg;
b6016b76 1556
583c28e5 1557 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
b2fadeae 1558 bnx2_5706s_force_link_dn(bp, 0);
583c28e5 1559 bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
b2fadeae 1560 }
b6016b76 1561 val = REG_RD(bp, BNX2_EMAC_STATUS);
a2724e25
MC
1562
1563 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1564 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1565 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1566
1567 if ((val & BNX2_EMAC_STATUS_LINK) &&
1568 !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
b6016b76
MC
1569 bmsr |= BMSR_LSTATUS;
1570 else
1571 bmsr &= ~BMSR_LSTATUS;
1572 }
1573
1574 if (bmsr & BMSR_LSTATUS) {
1575 bp->link_up = 1;
1576
583c28e5 1577 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5b0c76ad
MC
1578 if (CHIP_NUM(bp) == CHIP_NUM_5706)
1579 bnx2_5706s_linkup(bp);
1580 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1581 bnx2_5708s_linkup(bp);
27a005b8
MC
1582 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1583 bnx2_5709s_linkup(bp);
b6016b76
MC
1584 }
1585 else {
1586 bnx2_copper_linkup(bp);
1587 }
1588 bnx2_resolve_flow_ctrl(bp);
1589 }
1590 else {
583c28e5 1591 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
605a9e20
MC
1592 (bp->autoneg & AUTONEG_SPEED))
1593 bnx2_disable_forced_2g5(bp);
b6016b76 1594
583c28e5 1595 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
b2fadeae
MC
1596 u32 bmcr;
1597
1598 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1599 bmcr |= BMCR_ANENABLE;
1600 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1601
583c28e5 1602 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
b2fadeae 1603 }
b6016b76
MC
1604 bp->link_up = 0;
1605 }
1606
1607 if (bp->link_up != link_up) {
1608 bnx2_report_link(bp);
1609 }
1610
1611 bnx2_set_mac_link(bp);
1612
1613 return 0;
1614}
1615
1616static int
1617bnx2_reset_phy(struct bnx2 *bp)
1618{
1619 int i;
1620 u32 reg;
1621
ca58c3af 1622 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
b6016b76
MC
1623
1624#define PHY_RESET_MAX_WAIT 100
1625 for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1626 udelay(10);
1627
ca58c3af 1628 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
b6016b76
MC
1629 if (!(reg & BMCR_RESET)) {
1630 udelay(20);
1631 break;
1632 }
1633 }
1634 if (i == PHY_RESET_MAX_WAIT) {
1635 return -EBUSY;
1636 }
1637 return 0;
1638}
1639
1640static u32
1641bnx2_phy_get_pause_adv(struct bnx2 *bp)
1642{
1643 u32 adv = 0;
1644
1645 if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1646 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1647
583c28e5 1648 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
b6016b76
MC
1649 adv = ADVERTISE_1000XPAUSE;
1650 }
1651 else {
1652 adv = ADVERTISE_PAUSE_CAP;
1653 }
1654 }
1655 else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
583c28e5 1656 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
b6016b76
MC
1657 adv = ADVERTISE_1000XPSE_ASYM;
1658 }
1659 else {
1660 adv = ADVERTISE_PAUSE_ASYM;
1661 }
1662 }
1663 else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
583c28e5 1664 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
b6016b76
MC
1665 adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1666 }
1667 else {
1668 adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1669 }
1670 }
1671 return adv;
1672}
1673
a2f13890 1674static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
0d8a6571 1675
b6016b76 1676static int
0d8a6571 1677bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
52d07b1f
HH
1678__releases(&bp->phy_lock)
1679__acquires(&bp->phy_lock)
0d8a6571
MC
1680{
1681 u32 speed_arg = 0, pause_adv;
1682
1683 pause_adv = bnx2_phy_get_pause_adv(bp);
1684
1685 if (bp->autoneg & AUTONEG_SPEED) {
1686 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1687 if (bp->advertising & ADVERTISED_10baseT_Half)
1688 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1689 if (bp->advertising & ADVERTISED_10baseT_Full)
1690 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1691 if (bp->advertising & ADVERTISED_100baseT_Half)
1692 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1693 if (bp->advertising & ADVERTISED_100baseT_Full)
1694 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1695 if (bp->advertising & ADVERTISED_1000baseT_Full)
1696 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1697 if (bp->advertising & ADVERTISED_2500baseX_Full)
1698 speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1699 } else {
1700 if (bp->req_line_speed == SPEED_2500)
1701 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1702 else if (bp->req_line_speed == SPEED_1000)
1703 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1704 else if (bp->req_line_speed == SPEED_100) {
1705 if (bp->req_duplex == DUPLEX_FULL)
1706 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1707 else
1708 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1709 } else if (bp->req_line_speed == SPEED_10) {
1710 if (bp->req_duplex == DUPLEX_FULL)
1711 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1712 else
1713 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1714 }
1715 }
1716
1717 if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1718 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
c26736ec 1719 if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
0d8a6571
MC
1720 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1721
1722 if (port == PORT_TP)
1723 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1724 BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1725
2726d6e1 1726 bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
0d8a6571
MC
1727
1728 spin_unlock_bh(&bp->phy_lock);
a2f13890 1729 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
0d8a6571
MC
1730 spin_lock_bh(&bp->phy_lock);
1731
1732 return 0;
1733}
1734
1735static int
1736bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
52d07b1f
HH
1737__releases(&bp->phy_lock)
1738__acquires(&bp->phy_lock)
b6016b76 1739{
605a9e20 1740 u32 adv, bmcr;
b6016b76
MC
1741 u32 new_adv = 0;
1742
583c28e5 1743 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
1744 return (bnx2_setup_remote_phy(bp, port));
1745
b6016b76
MC
1746 if (!(bp->autoneg & AUTONEG_SPEED)) {
1747 u32 new_bmcr;
5b0c76ad
MC
1748 int force_link_down = 0;
1749
605a9e20
MC
1750 if (bp->req_line_speed == SPEED_2500) {
1751 if (!bnx2_test_and_enable_2g5(bp))
1752 force_link_down = 1;
1753 } else if (bp->req_line_speed == SPEED_1000) {
1754 if (bnx2_test_and_disable_2g5(bp))
1755 force_link_down = 1;
1756 }
ca58c3af 1757 bnx2_read_phy(bp, bp->mii_adv, &adv);
80be4434
MC
1758 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1759
ca58c3af 1760 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
605a9e20 1761 new_bmcr = bmcr & ~BMCR_ANENABLE;
80be4434 1762 new_bmcr |= BMCR_SPEED1000;
605a9e20 1763
27a005b8
MC
1764 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1765 if (bp->req_line_speed == SPEED_2500)
1766 bnx2_enable_forced_2g5(bp);
1767 else if (bp->req_line_speed == SPEED_1000) {
1768 bnx2_disable_forced_2g5(bp);
1769 new_bmcr &= ~0x2000;
1770 }
1771
1772 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
605a9e20
MC
1773 if (bp->req_line_speed == SPEED_2500)
1774 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1775 else
1776 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
5b0c76ad
MC
1777 }
1778
b6016b76 1779 if (bp->req_duplex == DUPLEX_FULL) {
5b0c76ad 1780 adv |= ADVERTISE_1000XFULL;
b6016b76
MC
1781 new_bmcr |= BMCR_FULLDPLX;
1782 }
1783 else {
5b0c76ad 1784 adv |= ADVERTISE_1000XHALF;
b6016b76
MC
1785 new_bmcr &= ~BMCR_FULLDPLX;
1786 }
5b0c76ad 1787 if ((new_bmcr != bmcr) || (force_link_down)) {
b6016b76
MC
1788 /* Force a link down visible on the other side */
1789 if (bp->link_up) {
ca58c3af 1790 bnx2_write_phy(bp, bp->mii_adv, adv &
5b0c76ad
MC
1791 ~(ADVERTISE_1000XFULL |
1792 ADVERTISE_1000XHALF));
ca58c3af 1793 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
b6016b76
MC
1794 BMCR_ANRESTART | BMCR_ANENABLE);
1795
1796 bp->link_up = 0;
1797 netif_carrier_off(bp->dev);
ca58c3af 1798 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
80be4434 1799 bnx2_report_link(bp);
b6016b76 1800 }
ca58c3af
MC
1801 bnx2_write_phy(bp, bp->mii_adv, adv);
1802 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
605a9e20
MC
1803 } else {
1804 bnx2_resolve_flow_ctrl(bp);
1805 bnx2_set_mac_link(bp);
b6016b76
MC
1806 }
1807 return 0;
1808 }
1809
605a9e20 1810 bnx2_test_and_enable_2g5(bp);
5b0c76ad 1811
b6016b76
MC
1812 if (bp->advertising & ADVERTISED_1000baseT_Full)
1813 new_adv |= ADVERTISE_1000XFULL;
1814
1815 new_adv |= bnx2_phy_get_pause_adv(bp);
1816
ca58c3af
MC
1817 bnx2_read_phy(bp, bp->mii_adv, &adv);
1818 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
1819
1820 bp->serdes_an_pending = 0;
1821 if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1822 /* Force a link down visible on the other side */
1823 if (bp->link_up) {
ca58c3af 1824 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
80be4434
MC
1825 spin_unlock_bh(&bp->phy_lock);
1826 msleep(20);
1827 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
1828 }
1829
ca58c3af
MC
1830 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1831 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
b6016b76 1832 BMCR_ANENABLE);
f8dd064e
MC
1833 /* Speed up link-up time when the link partner
1834 * does not autonegotiate which is very common
1835 * in blade servers. Some blade servers use
1836 * IPMI for kerboard input and it's important
1837 * to minimize link disruptions. Autoneg. involves
1838 * exchanging base pages plus 3 next pages and
1839 * normally completes in about 120 msec.
1840 */
40105c0b 1841 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
f8dd064e
MC
1842 bp->serdes_an_pending = 1;
1843 mod_timer(&bp->timer, jiffies + bp->current_interval);
605a9e20
MC
1844 } else {
1845 bnx2_resolve_flow_ctrl(bp);
1846 bnx2_set_mac_link(bp);
b6016b76
MC
1847 }
1848
1849 return 0;
1850}
1851
1852#define ETHTOOL_ALL_FIBRE_SPEED \
583c28e5 1853 (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ? \
deaf391b
MC
1854 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1855 (ADVERTISED_1000baseT_Full)
b6016b76
MC
1856
1857#define ETHTOOL_ALL_COPPER_SPEED \
1858 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | \
1859 ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | \
1860 ADVERTISED_1000baseT_Full)
1861
1862#define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1863 ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
6aa20a22 1864
b6016b76
MC
1865#define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1866
0d8a6571
MC
1867static void
1868bnx2_set_default_remote_link(struct bnx2 *bp)
1869{
1870 u32 link;
1871
1872 if (bp->phy_port == PORT_TP)
2726d6e1 1873 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
0d8a6571 1874 else
2726d6e1 1875 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
0d8a6571
MC
1876
1877 if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1878 bp->req_line_speed = 0;
1879 bp->autoneg |= AUTONEG_SPEED;
1880 bp->advertising = ADVERTISED_Autoneg;
1881 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1882 bp->advertising |= ADVERTISED_10baseT_Half;
1883 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1884 bp->advertising |= ADVERTISED_10baseT_Full;
1885 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1886 bp->advertising |= ADVERTISED_100baseT_Half;
1887 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1888 bp->advertising |= ADVERTISED_100baseT_Full;
1889 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1890 bp->advertising |= ADVERTISED_1000baseT_Full;
1891 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1892 bp->advertising |= ADVERTISED_2500baseX_Full;
1893 } else {
1894 bp->autoneg = 0;
1895 bp->advertising = 0;
1896 bp->req_duplex = DUPLEX_FULL;
1897 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1898 bp->req_line_speed = SPEED_10;
1899 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1900 bp->req_duplex = DUPLEX_HALF;
1901 }
1902 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1903 bp->req_line_speed = SPEED_100;
1904 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1905 bp->req_duplex = DUPLEX_HALF;
1906 }
1907 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1908 bp->req_line_speed = SPEED_1000;
1909 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1910 bp->req_line_speed = SPEED_2500;
1911 }
1912}
1913
deaf391b
MC
1914static void
1915bnx2_set_default_link(struct bnx2 *bp)
1916{
ab59859d
HH
1917 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1918 bnx2_set_default_remote_link(bp);
1919 return;
1920 }
0d8a6571 1921
deaf391b
MC
1922 bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1923 bp->req_line_speed = 0;
583c28e5 1924 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
deaf391b
MC
1925 u32 reg;
1926
1927 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1928
2726d6e1 1929 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
deaf391b
MC
1930 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1931 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1932 bp->autoneg = 0;
1933 bp->req_line_speed = bp->line_speed = SPEED_1000;
1934 bp->req_duplex = DUPLEX_FULL;
1935 }
1936 } else
1937 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1938}
1939
df149d70
MC
1940static void
1941bnx2_send_heart_beat(struct bnx2 *bp)
1942{
1943 u32 msg;
1944 u32 addr;
1945
1946 spin_lock(&bp->indirect_lock);
1947 msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1948 addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1949 REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1950 REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1951 spin_unlock(&bp->indirect_lock);
1952}
1953
0d8a6571
MC
1954static void
1955bnx2_remote_phy_event(struct bnx2 *bp)
1956{
1957 u32 msg;
1958 u8 link_up = bp->link_up;
1959 u8 old_port;
1960
2726d6e1 1961 msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
0d8a6571 1962
df149d70
MC
1963 if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1964 bnx2_send_heart_beat(bp);
1965
1966 msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1967
0d8a6571
MC
1968 if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1969 bp->link_up = 0;
1970 else {
1971 u32 speed;
1972
1973 bp->link_up = 1;
1974 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1975 bp->duplex = DUPLEX_FULL;
1976 switch (speed) {
1977 case BNX2_LINK_STATUS_10HALF:
1978 bp->duplex = DUPLEX_HALF;
1979 case BNX2_LINK_STATUS_10FULL:
1980 bp->line_speed = SPEED_10;
1981 break;
1982 case BNX2_LINK_STATUS_100HALF:
1983 bp->duplex = DUPLEX_HALF;
1984 case BNX2_LINK_STATUS_100BASE_T4:
1985 case BNX2_LINK_STATUS_100FULL:
1986 bp->line_speed = SPEED_100;
1987 break;
1988 case BNX2_LINK_STATUS_1000HALF:
1989 bp->duplex = DUPLEX_HALF;
1990 case BNX2_LINK_STATUS_1000FULL:
1991 bp->line_speed = SPEED_1000;
1992 break;
1993 case BNX2_LINK_STATUS_2500HALF:
1994 bp->duplex = DUPLEX_HALF;
1995 case BNX2_LINK_STATUS_2500FULL:
1996 bp->line_speed = SPEED_2500;
1997 break;
1998 default:
1999 bp->line_speed = 0;
2000 break;
2001 }
2002
0d8a6571
MC
2003 bp->flow_ctrl = 0;
2004 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
2005 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
2006 if (bp->duplex == DUPLEX_FULL)
2007 bp->flow_ctrl = bp->req_flow_ctrl;
2008 } else {
2009 if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
2010 bp->flow_ctrl |= FLOW_CTRL_TX;
2011 if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2012 bp->flow_ctrl |= FLOW_CTRL_RX;
2013 }
2014
2015 old_port = bp->phy_port;
2016 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2017 bp->phy_port = PORT_FIBRE;
2018 else
2019 bp->phy_port = PORT_TP;
2020
2021 if (old_port != bp->phy_port)
2022 bnx2_set_default_link(bp);
2023
0d8a6571
MC
2024 }
2025 if (bp->link_up != link_up)
2026 bnx2_report_link(bp);
2027
2028 bnx2_set_mac_link(bp);
2029}
2030
2031static int
2032bnx2_set_remote_link(struct bnx2 *bp)
2033{
2034 u32 evt_code;
2035
2726d6e1 2036 evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
0d8a6571
MC
2037 switch (evt_code) {
2038 case BNX2_FW_EVT_CODE_LINK_EVENT:
2039 bnx2_remote_phy_event(bp);
2040 break;
2041 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2042 default:
df149d70 2043 bnx2_send_heart_beat(bp);
0d8a6571
MC
2044 break;
2045 }
2046 return 0;
2047}
2048
b6016b76
MC
2049static int
2050bnx2_setup_copper_phy(struct bnx2 *bp)
52d07b1f
HH
2051__releases(&bp->phy_lock)
2052__acquires(&bp->phy_lock)
b6016b76
MC
2053{
2054 u32 bmcr;
2055 u32 new_bmcr;
2056
ca58c3af 2057 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76
MC
2058
2059 if (bp->autoneg & AUTONEG_SPEED) {
2060 u32 adv_reg, adv1000_reg;
2061 u32 new_adv_reg = 0;
2062 u32 new_adv1000_reg = 0;
2063
ca58c3af 2064 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
b6016b76
MC
2065 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2066 ADVERTISE_PAUSE_ASYM);
2067
2068 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2069 adv1000_reg &= PHY_ALL_1000_SPEED;
2070
2071 if (bp->advertising & ADVERTISED_10baseT_Half)
2072 new_adv_reg |= ADVERTISE_10HALF;
2073 if (bp->advertising & ADVERTISED_10baseT_Full)
2074 new_adv_reg |= ADVERTISE_10FULL;
2075 if (bp->advertising & ADVERTISED_100baseT_Half)
2076 new_adv_reg |= ADVERTISE_100HALF;
2077 if (bp->advertising & ADVERTISED_100baseT_Full)
2078 new_adv_reg |= ADVERTISE_100FULL;
2079 if (bp->advertising & ADVERTISED_1000baseT_Full)
2080 new_adv1000_reg |= ADVERTISE_1000FULL;
6aa20a22 2081
b6016b76
MC
2082 new_adv_reg |= ADVERTISE_CSMA;
2083
2084 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
2085
2086 if ((adv1000_reg != new_adv1000_reg) ||
2087 (adv_reg != new_adv_reg) ||
2088 ((bmcr & BMCR_ANENABLE) == 0)) {
2089
ca58c3af 2090 bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
b6016b76 2091 bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
ca58c3af 2092 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
b6016b76
MC
2093 BMCR_ANENABLE);
2094 }
2095 else if (bp->link_up) {
2096 /* Flow ctrl may have changed from auto to forced */
2097 /* or vice-versa. */
2098
2099 bnx2_resolve_flow_ctrl(bp);
2100 bnx2_set_mac_link(bp);
2101 }
2102 return 0;
2103 }
2104
2105 new_bmcr = 0;
2106 if (bp->req_line_speed == SPEED_100) {
2107 new_bmcr |= BMCR_SPEED100;
2108 }
2109 if (bp->req_duplex == DUPLEX_FULL) {
2110 new_bmcr |= BMCR_FULLDPLX;
2111 }
2112 if (new_bmcr != bmcr) {
2113 u32 bmsr;
b6016b76 2114
ca58c3af
MC
2115 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2116 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
6aa20a22 2117
b6016b76
MC
2118 if (bmsr & BMSR_LSTATUS) {
2119 /* Force link down */
ca58c3af 2120 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
a16dda0e
MC
2121 spin_unlock_bh(&bp->phy_lock);
2122 msleep(50);
2123 spin_lock_bh(&bp->phy_lock);
2124
ca58c3af
MC
2125 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2126 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
b6016b76
MC
2127 }
2128
ca58c3af 2129 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
b6016b76
MC
2130
2131 /* Normally, the new speed is setup after the link has
2132 * gone down and up again. In some cases, link will not go
2133 * down so we need to set up the new speed here.
2134 */
2135 if (bmsr & BMSR_LSTATUS) {
2136 bp->line_speed = bp->req_line_speed;
2137 bp->duplex = bp->req_duplex;
2138 bnx2_resolve_flow_ctrl(bp);
2139 bnx2_set_mac_link(bp);
2140 }
27a005b8
MC
2141 } else {
2142 bnx2_resolve_flow_ctrl(bp);
2143 bnx2_set_mac_link(bp);
b6016b76
MC
2144 }
2145 return 0;
2146}
2147
2148static int
0d8a6571 2149bnx2_setup_phy(struct bnx2 *bp, u8 port)
52d07b1f
HH
2150__releases(&bp->phy_lock)
2151__acquires(&bp->phy_lock)
b6016b76
MC
2152{
2153 if (bp->loopback == MAC_LOOPBACK)
2154 return 0;
2155
583c28e5 2156 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
0d8a6571 2157 return (bnx2_setup_serdes_phy(bp, port));
b6016b76
MC
2158 }
2159 else {
2160 return (bnx2_setup_copper_phy(bp));
2161 }
2162}
2163
27a005b8 2164static int
9a120bc5 2165bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
27a005b8
MC
2166{
2167 u32 val;
2168
2169 bp->mii_bmcr = MII_BMCR + 0x10;
2170 bp->mii_bmsr = MII_BMSR + 0x10;
2171 bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2172 bp->mii_adv = MII_ADVERTISE + 0x10;
2173 bp->mii_lpa = MII_LPA + 0x10;
2174 bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2175
2176 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2177 bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2178
2179 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
9a120bc5
MC
2180 if (reset_phy)
2181 bnx2_reset_phy(bp);
27a005b8
MC
2182
2183 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2184
2185 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2186 val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2187 val |= MII_BNX2_SD_1000XCTL1_FIBER;
2188 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2189
2190 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2191 bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
583c28e5 2192 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
27a005b8
MC
2193 val |= BCM5708S_UP1_2G5;
2194 else
2195 val &= ~BCM5708S_UP1_2G5;
2196 bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2197
2198 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2199 bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2200 val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2201 bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2202
2203 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2204
2205 val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2206 MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2207 bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2208
2209 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2210
2211 return 0;
2212}
2213
b6016b76 2214static int
9a120bc5 2215bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
5b0c76ad
MC
2216{
2217 u32 val;
2218
9a120bc5
MC
2219 if (reset_phy)
2220 bnx2_reset_phy(bp);
27a005b8
MC
2221
2222 bp->mii_up1 = BCM5708S_UP1;
2223
5b0c76ad
MC
2224 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2225 bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2226 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2227
2228 bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2229 val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2230 bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2231
2232 bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2233 val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2234 bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2235
583c28e5 2236 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
5b0c76ad
MC
2237 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2238 val |= BCM5708S_UP1_2G5;
2239 bnx2_write_phy(bp, BCM5708S_UP1, val);
2240 }
2241
2242 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
dda1e390
MC
2243 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2244 (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
5b0c76ad
MC
2245 /* increase tx signal amplitude */
2246 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2247 BCM5708S_BLK_ADDR_TX_MISC);
2248 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2249 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2250 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2251 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2252 }
2253
2726d6e1 2254 val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
5b0c76ad
MC
2255 BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2256
2257 if (val) {
2258 u32 is_backplane;
2259
2726d6e1 2260 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
5b0c76ad
MC
2261 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2262 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2263 BCM5708S_BLK_ADDR_TX_MISC);
2264 bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2265 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2266 BCM5708S_BLK_ADDR_DIG);
2267 }
2268 }
2269 return 0;
2270}
2271
2272static int
9a120bc5 2273bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
b6016b76 2274{
9a120bc5
MC
2275 if (reset_phy)
2276 bnx2_reset_phy(bp);
27a005b8 2277
583c28e5 2278 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
b6016b76 2279
59b47d8a
MC
2280 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2281 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
b6016b76
MC
2282
2283 if (bp->dev->mtu > 1500) {
2284 u32 val;
2285
2286 /* Set extended packet length bit */
2287 bnx2_write_phy(bp, 0x18, 0x7);
2288 bnx2_read_phy(bp, 0x18, &val);
2289 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2290
2291 bnx2_write_phy(bp, 0x1c, 0x6c00);
2292 bnx2_read_phy(bp, 0x1c, &val);
2293 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2294 }
2295 else {
2296 u32 val;
2297
2298 bnx2_write_phy(bp, 0x18, 0x7);
2299 bnx2_read_phy(bp, 0x18, &val);
2300 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2301
2302 bnx2_write_phy(bp, 0x1c, 0x6c00);
2303 bnx2_read_phy(bp, 0x1c, &val);
2304 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2305 }
2306
2307 return 0;
2308}
2309
2310static int
9a120bc5 2311bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
b6016b76 2312{
5b0c76ad
MC
2313 u32 val;
2314
9a120bc5
MC
2315 if (reset_phy)
2316 bnx2_reset_phy(bp);
27a005b8 2317
583c28e5 2318 if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
b6016b76
MC
2319 bnx2_write_phy(bp, 0x18, 0x0c00);
2320 bnx2_write_phy(bp, 0x17, 0x000a);
2321 bnx2_write_phy(bp, 0x15, 0x310b);
2322 bnx2_write_phy(bp, 0x17, 0x201f);
2323 bnx2_write_phy(bp, 0x15, 0x9506);
2324 bnx2_write_phy(bp, 0x17, 0x401f);
2325 bnx2_write_phy(bp, 0x15, 0x14e2);
2326 bnx2_write_phy(bp, 0x18, 0x0400);
2327 }
2328
583c28e5 2329 if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
b659f44e
MC
2330 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2331 MII_BNX2_DSP_EXPAND_REG | 0x8);
2332 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2333 val &= ~(1 << 8);
2334 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2335 }
2336
b6016b76 2337 if (bp->dev->mtu > 1500) {
b6016b76
MC
2338 /* Set extended packet length bit */
2339 bnx2_write_phy(bp, 0x18, 0x7);
2340 bnx2_read_phy(bp, 0x18, &val);
2341 bnx2_write_phy(bp, 0x18, val | 0x4000);
2342
2343 bnx2_read_phy(bp, 0x10, &val);
2344 bnx2_write_phy(bp, 0x10, val | 0x1);
2345 }
2346 else {
b6016b76
MC
2347 bnx2_write_phy(bp, 0x18, 0x7);
2348 bnx2_read_phy(bp, 0x18, &val);
2349 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2350
2351 bnx2_read_phy(bp, 0x10, &val);
2352 bnx2_write_phy(bp, 0x10, val & ~0x1);
2353 }
2354
5b0c76ad
MC
2355 /* ethernet@wirespeed */
2356 bnx2_write_phy(bp, 0x18, 0x7007);
2357 bnx2_read_phy(bp, 0x18, &val);
2358 bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
b6016b76
MC
2359 return 0;
2360}
2361
2362
2363static int
9a120bc5 2364bnx2_init_phy(struct bnx2 *bp, int reset_phy)
52d07b1f
HH
2365__releases(&bp->phy_lock)
2366__acquires(&bp->phy_lock)
b6016b76
MC
2367{
2368 u32 val;
2369 int rc = 0;
2370
583c28e5
MC
2371 bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2372 bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
b6016b76 2373
ca58c3af
MC
2374 bp->mii_bmcr = MII_BMCR;
2375 bp->mii_bmsr = MII_BMSR;
27a005b8 2376 bp->mii_bmsr1 = MII_BMSR;
ca58c3af
MC
2377 bp->mii_adv = MII_ADVERTISE;
2378 bp->mii_lpa = MII_LPA;
2379
b6016b76
MC
2380 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2381
583c28e5 2382 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
2383 goto setup_phy;
2384
b6016b76
MC
2385 bnx2_read_phy(bp, MII_PHYSID1, &val);
2386 bp->phy_id = val << 16;
2387 bnx2_read_phy(bp, MII_PHYSID2, &val);
2388 bp->phy_id |= val & 0xffff;
2389
583c28e5 2390 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
5b0c76ad 2391 if (CHIP_NUM(bp) == CHIP_NUM_5706)
9a120bc5 2392 rc = bnx2_init_5706s_phy(bp, reset_phy);
5b0c76ad 2393 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
9a120bc5 2394 rc = bnx2_init_5708s_phy(bp, reset_phy);
27a005b8 2395 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
9a120bc5 2396 rc = bnx2_init_5709s_phy(bp, reset_phy);
b6016b76
MC
2397 }
2398 else {
9a120bc5 2399 rc = bnx2_init_copper_phy(bp, reset_phy);
b6016b76
MC
2400 }
2401
0d8a6571
MC
2402setup_phy:
2403 if (!rc)
2404 rc = bnx2_setup_phy(bp, bp->phy_port);
b6016b76
MC
2405
2406 return rc;
2407}
2408
2409static int
2410bnx2_set_mac_loopback(struct bnx2 *bp)
2411{
2412 u32 mac_mode;
2413
2414 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2415 mac_mode &= ~BNX2_EMAC_MODE_PORT;
2416 mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2417 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2418 bp->link_up = 1;
2419 return 0;
2420}
2421
bc5a0690
MC
2422static int bnx2_test_link(struct bnx2 *);
2423
2424static int
2425bnx2_set_phy_loopback(struct bnx2 *bp)
2426{
2427 u32 mac_mode;
2428 int rc, i;
2429
2430 spin_lock_bh(&bp->phy_lock);
ca58c3af 2431 rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
bc5a0690
MC
2432 BMCR_SPEED1000);
2433 spin_unlock_bh(&bp->phy_lock);
2434 if (rc)
2435 return rc;
2436
2437 for (i = 0; i < 10; i++) {
2438 if (bnx2_test_link(bp) == 0)
2439 break;
80be4434 2440 msleep(100);
bc5a0690
MC
2441 }
2442
2443 mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2444 mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2445 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
59b47d8a 2446 BNX2_EMAC_MODE_25G_MODE);
bc5a0690
MC
2447
2448 mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2449 REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2450 bp->link_up = 1;
2451 return 0;
2452}
2453
b6016b76 2454static int
a2f13890 2455bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
b6016b76
MC
2456{
2457 int i;
2458 u32 val;
2459
b6016b76
MC
2460 bp->fw_wr_seq++;
2461 msg_data |= bp->fw_wr_seq;
2462
2726d6e1 2463 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
b6016b76 2464
a2f13890
MC
2465 if (!ack)
2466 return 0;
2467
b6016b76 2468 /* wait for an acknowledgement. */
40105c0b 2469 for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
b090ae2b 2470 msleep(10);
b6016b76 2471
2726d6e1 2472 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
b6016b76
MC
2473
2474 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2475 break;
2476 }
b090ae2b
MC
2477 if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2478 return 0;
b6016b76
MC
2479
2480 /* If we timed out, inform the firmware that this is the case. */
b090ae2b
MC
2481 if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2482 if (!silent)
3a9c6a49 2483 pr_err("fw sync timeout, reset code = %x\n", msg_data);
b6016b76
MC
2484
2485 msg_data &= ~BNX2_DRV_MSG_CODE;
2486 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2487
2726d6e1 2488 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
b6016b76 2489
b6016b76
MC
2490 return -EBUSY;
2491 }
2492
b090ae2b
MC
2493 if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2494 return -EIO;
2495
b6016b76
MC
2496 return 0;
2497}
2498
59b47d8a
MC
2499static int
2500bnx2_init_5709_context(struct bnx2 *bp)
2501{
2502 int i, ret = 0;
2503 u32 val;
2504
2505 val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2506 val |= (BCM_PAGE_BITS - 8) << 16;
2507 REG_WR(bp, BNX2_CTX_COMMAND, val);
641bdcd5
MC
2508 for (i = 0; i < 10; i++) {
2509 val = REG_RD(bp, BNX2_CTX_COMMAND);
2510 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2511 break;
2512 udelay(2);
2513 }
2514 if (val & BNX2_CTX_COMMAND_MEM_INIT)
2515 return -EBUSY;
2516
59b47d8a
MC
2517 for (i = 0; i < bp->ctx_pages; i++) {
2518 int j;
2519
352f7687
MC
2520 if (bp->ctx_blk[i])
2521 memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2522 else
2523 return -ENOMEM;
2524
59b47d8a
MC
2525 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2526 (bp->ctx_blk_mapping[i] & 0xffffffff) |
2527 BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2528 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2529 (u64) bp->ctx_blk_mapping[i] >> 32);
2530 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2531 BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2532 for (j = 0; j < 10; j++) {
2533
2534 val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2535 if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2536 break;
2537 udelay(5);
2538 }
2539 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2540 ret = -EBUSY;
2541 break;
2542 }
2543 }
2544 return ret;
2545}
2546
b6016b76
MC
2547static void
2548bnx2_init_context(struct bnx2 *bp)
2549{
2550 u32 vcid;
2551
2552 vcid = 96;
2553 while (vcid) {
2554 u32 vcid_addr, pcid_addr, offset;
7947b20e 2555 int i;
b6016b76
MC
2556
2557 vcid--;
2558
2559 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2560 u32 new_vcid;
2561
2562 vcid_addr = GET_PCID_ADDR(vcid);
2563 if (vcid & 0x8) {
2564 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2565 }
2566 else {
2567 new_vcid = vcid;
2568 }
2569 pcid_addr = GET_PCID_ADDR(new_vcid);
2570 }
2571 else {
2572 vcid_addr = GET_CID_ADDR(vcid);
2573 pcid_addr = vcid_addr;
2574 }
2575
7947b20e
MC
2576 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2577 vcid_addr += (i << PHY_CTX_SHIFT);
2578 pcid_addr += (i << PHY_CTX_SHIFT);
b6016b76 2579
5d5d0015 2580 REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
7947b20e 2581 REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
b6016b76 2582
7947b20e
MC
2583 /* Zero out the context. */
2584 for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
62a8313c 2585 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
7947b20e 2586 }
b6016b76
MC
2587 }
2588}
2589
2590static int
2591bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2592{
2593 u16 *good_mbuf;
2594 u32 good_mbuf_cnt;
2595 u32 val;
2596
2597 good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2598 if (good_mbuf == NULL) {
3a9c6a49 2599 pr_err("Failed to allocate memory in %s\n", __func__);
b6016b76
MC
2600 return -ENOMEM;
2601 }
2602
2603 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2604 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2605
2606 good_mbuf_cnt = 0;
2607
2608 /* Allocate a bunch of mbufs and save the good ones in an array. */
2726d6e1 2609 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
b6016b76 2610 while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2726d6e1
MC
2611 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2612 BNX2_RBUF_COMMAND_ALLOC_REQ);
b6016b76 2613
2726d6e1 2614 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
b6016b76
MC
2615
2616 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2617
2618 /* The addresses with Bit 9 set are bad memory blocks. */
2619 if (!(val & (1 << 9))) {
2620 good_mbuf[good_mbuf_cnt] = (u16) val;
2621 good_mbuf_cnt++;
2622 }
2623
2726d6e1 2624 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
b6016b76
MC
2625 }
2626
2627 /* Free the good ones back to the mbuf pool thus discarding
2628 * all the bad ones. */
2629 while (good_mbuf_cnt) {
2630 good_mbuf_cnt--;
2631
2632 val = good_mbuf[good_mbuf_cnt];
2633 val = (val << 9) | val | 1;
2634
2726d6e1 2635 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
b6016b76
MC
2636 }
2637 kfree(good_mbuf);
2638 return 0;
2639}
2640
2641static void
5fcaed01 2642bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
b6016b76
MC
2643{
2644 u32 val;
b6016b76
MC
2645
2646 val = (mac_addr[0] << 8) | mac_addr[1];
2647
5fcaed01 2648 REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
b6016b76 2649
6aa20a22 2650 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
b6016b76
MC
2651 (mac_addr[4] << 8) | mac_addr[5];
2652
5fcaed01 2653 REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
b6016b76
MC
2654}
2655
47bf4246 2656static inline int
bb4f98ab 2657bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
47bf4246
MC
2658{
2659 dma_addr_t mapping;
bb4f98ab 2660 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
47bf4246 2661 struct rx_bd *rxbd =
bb4f98ab 2662 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
47bf4246
MC
2663 struct page *page = alloc_page(GFP_ATOMIC);
2664
2665 if (!page)
2666 return -ENOMEM;
2667 mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2668 PCI_DMA_FROMDEVICE);
3d16af86
BL
2669 if (pci_dma_mapping_error(bp->pdev, mapping)) {
2670 __free_page(page);
2671 return -EIO;
2672 }
2673
47bf4246 2674 rx_pg->page = page;
1a4ccc2d 2675 dma_unmap_addr_set(rx_pg, mapping, mapping);
47bf4246
MC
2676 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2677 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2678 return 0;
2679}
2680
2681static void
bb4f98ab 2682bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
47bf4246 2683{
bb4f98ab 2684 struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
47bf4246
MC
2685 struct page *page = rx_pg->page;
2686
2687 if (!page)
2688 return;
2689
1a4ccc2d 2690 pci_unmap_page(bp->pdev, dma_unmap_addr(rx_pg, mapping), PAGE_SIZE,
47bf4246
MC
2691 PCI_DMA_FROMDEVICE);
2692
2693 __free_page(page);
2694 rx_pg->page = NULL;
2695}
2696
b6016b76 2697static inline int
bb4f98ab 2698bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
b6016b76
MC
2699{
2700 struct sk_buff *skb;
bb4f98ab 2701 struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
b6016b76 2702 dma_addr_t mapping;
bb4f98ab 2703 struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
b6016b76
MC
2704 unsigned long align;
2705
932f3772 2706 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
b6016b76
MC
2707 if (skb == NULL) {
2708 return -ENOMEM;
2709 }
2710
59b47d8a
MC
2711 if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2712 skb_reserve(skb, BNX2_RX_ALIGN - align);
b6016b76 2713
b6016b76
MC
2714 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2715 PCI_DMA_FROMDEVICE);
3d16af86
BL
2716 if (pci_dma_mapping_error(bp->pdev, mapping)) {
2717 dev_kfree_skb(skb);
2718 return -EIO;
2719 }
b6016b76
MC
2720
2721 rx_buf->skb = skb;
a33fa66b 2722 rx_buf->desc = (struct l2_fhdr *) skb->data;
1a4ccc2d 2723 dma_unmap_addr_set(rx_buf, mapping, mapping);
b6016b76
MC
2724
2725 rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2726 rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2727
bb4f98ab 2728 rxr->rx_prod_bseq += bp->rx_buf_use_size;
b6016b76
MC
2729
2730 return 0;
2731}
2732
da3e4fbe 2733static int
35efa7c1 2734bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
b6016b76 2735{
43e80b89 2736 struct status_block *sblk = bnapi->status_blk.msi;
b6016b76 2737 u32 new_link_state, old_link_state;
da3e4fbe 2738 int is_set = 1;
b6016b76 2739
da3e4fbe
MC
2740 new_link_state = sblk->status_attn_bits & event;
2741 old_link_state = sblk->status_attn_bits_ack & event;
b6016b76 2742 if (new_link_state != old_link_state) {
da3e4fbe
MC
2743 if (new_link_state)
2744 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2745 else
2746 REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2747 } else
2748 is_set = 0;
2749
2750 return is_set;
2751}
2752
2753static void
35efa7c1 2754bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
da3e4fbe 2755{
74ecc62d
MC
2756 spin_lock(&bp->phy_lock);
2757
2758 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
b6016b76 2759 bnx2_set_link(bp);
35efa7c1 2760 if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
0d8a6571
MC
2761 bnx2_set_remote_link(bp);
2762
74ecc62d
MC
2763 spin_unlock(&bp->phy_lock);
2764
b6016b76
MC
2765}
2766
ead7270b 2767static inline u16
35efa7c1 2768bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
ead7270b
MC
2769{
2770 u16 cons;
2771
43e80b89
MC
2772 /* Tell compiler that status block fields can change. */
2773 barrier();
2774 cons = *bnapi->hw_tx_cons_ptr;
581daf7e 2775 barrier();
ead7270b
MC
2776 if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2777 cons++;
2778 return cons;
2779}
2780
57851d84
MC
2781static int
2782bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
b6016b76 2783{
35e9010b 2784 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
b6016b76 2785 u16 hw_cons, sw_cons, sw_ring_cons;
706bf240
BL
2786 int tx_pkt = 0, index;
2787 struct netdev_queue *txq;
2788
2789 index = (bnapi - bp->bnx2_napi);
2790 txq = netdev_get_tx_queue(bp->dev, index);
b6016b76 2791
35efa7c1 2792 hw_cons = bnx2_get_hw_tx_cons(bnapi);
35e9010b 2793 sw_cons = txr->tx_cons;
b6016b76
MC
2794
2795 while (sw_cons != hw_cons) {
3d16af86 2796 struct sw_tx_bd *tx_buf;
b6016b76
MC
2797 struct sk_buff *skb;
2798 int i, last;
2799
2800 sw_ring_cons = TX_RING_IDX(sw_cons);
2801
35e9010b 2802 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
b6016b76 2803 skb = tx_buf->skb;
1d39ed56 2804
d62fda08
ED
2805 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2806 prefetch(&skb->end);
2807
b6016b76 2808 /* partial BD completions possible with TSO packets */
d62fda08 2809 if (tx_buf->is_gso) {
b6016b76
MC
2810 u16 last_idx, last_ring_idx;
2811
d62fda08
ED
2812 last_idx = sw_cons + tx_buf->nr_frags + 1;
2813 last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
b6016b76
MC
2814 if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2815 last_idx++;
2816 }
2817 if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2818 break;
2819 }
2820 }
1d39ed56 2821
1a4ccc2d 2822 pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
e95524a7 2823 skb_headlen(skb), PCI_DMA_TODEVICE);
b6016b76
MC
2824
2825 tx_buf->skb = NULL;
d62fda08 2826 last = tx_buf->nr_frags;
b6016b76
MC
2827
2828 for (i = 0; i < last; i++) {
2829 sw_cons = NEXT_TX_BD(sw_cons);
e95524a7
AD
2830
2831 pci_unmap_page(bp->pdev,
1a4ccc2d 2832 dma_unmap_addr(
e95524a7
AD
2833 &txr->tx_buf_ring[TX_RING_IDX(sw_cons)],
2834 mapping),
2835 skb_shinfo(skb)->frags[i].size,
2836 PCI_DMA_TODEVICE);
b6016b76
MC
2837 }
2838
2839 sw_cons = NEXT_TX_BD(sw_cons);
2840
745720e5 2841 dev_kfree_skb(skb);
57851d84
MC
2842 tx_pkt++;
2843 if (tx_pkt == budget)
2844 break;
b6016b76 2845
d62fda08
ED
2846 if (hw_cons == sw_cons)
2847 hw_cons = bnx2_get_hw_tx_cons(bnapi);
b6016b76
MC
2848 }
2849
35e9010b
MC
2850 txr->hw_tx_cons = hw_cons;
2851 txr->tx_cons = sw_cons;
706bf240 2852
2f8af120 2853 /* Need to make the tx_cons update visible to bnx2_start_xmit()
706bf240 2854 * before checking for netif_tx_queue_stopped(). Without the
2f8af120
MC
2855 * memory barrier, there is a small possibility that bnx2_start_xmit()
2856 * will miss it and cause the queue to be stopped forever.
2857 */
2858 smp_mb();
b6016b76 2859
706bf240 2860 if (unlikely(netif_tx_queue_stopped(txq)) &&
35e9010b 2861 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
706bf240
BL
2862 __netif_tx_lock(txq, smp_processor_id());
2863 if ((netif_tx_queue_stopped(txq)) &&
35e9010b 2864 (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
706bf240
BL
2865 netif_tx_wake_queue(txq);
2866 __netif_tx_unlock(txq);
b6016b76 2867 }
706bf240 2868
57851d84 2869 return tx_pkt;
b6016b76
MC
2870}
2871
1db82f2a 2872static void
bb4f98ab 2873bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
a1f60190 2874 struct sk_buff *skb, int count)
1db82f2a
MC
2875{
2876 struct sw_pg *cons_rx_pg, *prod_rx_pg;
2877 struct rx_bd *cons_bd, *prod_bd;
1db82f2a 2878 int i;
3d16af86 2879 u16 hw_prod, prod;
bb4f98ab 2880 u16 cons = rxr->rx_pg_cons;
1db82f2a 2881
3d16af86
BL
2882 cons_rx_pg = &rxr->rx_pg_ring[cons];
2883
2884 /* The caller was unable to allocate a new page to replace the
2885 * last one in the frags array, so we need to recycle that page
2886 * and then free the skb.
2887 */
2888 if (skb) {
2889 struct page *page;
2890 struct skb_shared_info *shinfo;
2891
2892 shinfo = skb_shinfo(skb);
2893 shinfo->nr_frags--;
2894 page = shinfo->frags[shinfo->nr_frags].page;
2895 shinfo->frags[shinfo->nr_frags].page = NULL;
2896
2897 cons_rx_pg->page = page;
2898 dev_kfree_skb(skb);
2899 }
2900
2901 hw_prod = rxr->rx_pg_prod;
2902
1db82f2a
MC
2903 for (i = 0; i < count; i++) {
2904 prod = RX_PG_RING_IDX(hw_prod);
2905
bb4f98ab
MC
2906 prod_rx_pg = &rxr->rx_pg_ring[prod];
2907 cons_rx_pg = &rxr->rx_pg_ring[cons];
2908 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2909 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
1db82f2a 2910
1db82f2a
MC
2911 if (prod != cons) {
2912 prod_rx_pg->page = cons_rx_pg->page;
2913 cons_rx_pg->page = NULL;
1a4ccc2d
FT
2914 dma_unmap_addr_set(prod_rx_pg, mapping,
2915 dma_unmap_addr(cons_rx_pg, mapping));
1db82f2a
MC
2916
2917 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2918 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2919
2920 }
2921 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2922 hw_prod = NEXT_RX_BD(hw_prod);
2923 }
bb4f98ab
MC
2924 rxr->rx_pg_prod = hw_prod;
2925 rxr->rx_pg_cons = cons;
1db82f2a
MC
2926}
2927
b6016b76 2928static inline void
bb4f98ab
MC
2929bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2930 struct sk_buff *skb, u16 cons, u16 prod)
b6016b76 2931{
236b6394
MC
2932 struct sw_bd *cons_rx_buf, *prod_rx_buf;
2933 struct rx_bd *cons_bd, *prod_bd;
2934
bb4f98ab
MC
2935 cons_rx_buf = &rxr->rx_buf_ring[cons];
2936 prod_rx_buf = &rxr->rx_buf_ring[prod];
b6016b76
MC
2937
2938 pci_dma_sync_single_for_device(bp->pdev,
1a4ccc2d 2939 dma_unmap_addr(cons_rx_buf, mapping),
601d3d18 2940 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
b6016b76 2941
bb4f98ab 2942 rxr->rx_prod_bseq += bp->rx_buf_use_size;
b6016b76 2943
236b6394 2944 prod_rx_buf->skb = skb;
a33fa66b 2945 prod_rx_buf->desc = (struct l2_fhdr *) skb->data;
b6016b76 2946
236b6394
MC
2947 if (cons == prod)
2948 return;
b6016b76 2949
1a4ccc2d
FT
2950 dma_unmap_addr_set(prod_rx_buf, mapping,
2951 dma_unmap_addr(cons_rx_buf, mapping));
236b6394 2952
bb4f98ab
MC
2953 cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2954 prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
236b6394
MC
2955 prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2956 prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
b6016b76
MC
2957}
2958
85833c62 2959static int
bb4f98ab 2960bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
a1f60190
MC
2961 unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2962 u32 ring_idx)
85833c62
MC
2963{
2964 int err;
2965 u16 prod = ring_idx & 0xffff;
2966
bb4f98ab 2967 err = bnx2_alloc_rx_skb(bp, rxr, prod);
85833c62 2968 if (unlikely(err)) {
bb4f98ab 2969 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
1db82f2a
MC
2970 if (hdr_len) {
2971 unsigned int raw_len = len + 4;
2972 int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2973
bb4f98ab 2974 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
1db82f2a 2975 }
85833c62
MC
2976 return err;
2977 }
2978
d89cb6af 2979 skb_reserve(skb, BNX2_RX_OFFSET);
85833c62
MC
2980 pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2981 PCI_DMA_FROMDEVICE);
2982
1db82f2a
MC
2983 if (hdr_len == 0) {
2984 skb_put(skb, len);
2985 return 0;
2986 } else {
2987 unsigned int i, frag_len, frag_size, pages;
2988 struct sw_pg *rx_pg;
bb4f98ab
MC
2989 u16 pg_cons = rxr->rx_pg_cons;
2990 u16 pg_prod = rxr->rx_pg_prod;
1db82f2a
MC
2991
2992 frag_size = len + 4 - hdr_len;
2993 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2994 skb_put(skb, hdr_len);
2995
2996 for (i = 0; i < pages; i++) {
3d16af86
BL
2997 dma_addr_t mapping_old;
2998
1db82f2a
MC
2999 frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
3000 if (unlikely(frag_len <= 4)) {
3001 unsigned int tail = 4 - frag_len;
3002
bb4f98ab
MC
3003 rxr->rx_pg_cons = pg_cons;
3004 rxr->rx_pg_prod = pg_prod;
3005 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
a1f60190 3006 pages - i);
1db82f2a
MC
3007 skb->len -= tail;
3008 if (i == 0) {
3009 skb->tail -= tail;
3010 } else {
3011 skb_frag_t *frag =
3012 &skb_shinfo(skb)->frags[i - 1];
3013 frag->size -= tail;
3014 skb->data_len -= tail;
3015 skb->truesize -= tail;
3016 }
3017 return 0;
3018 }
bb4f98ab 3019 rx_pg = &rxr->rx_pg_ring[pg_cons];
1db82f2a 3020
3d16af86
BL
3021 /* Don't unmap yet. If we're unable to allocate a new
3022 * page, we need to recycle the page and the DMA addr.
3023 */
1a4ccc2d 3024 mapping_old = dma_unmap_addr(rx_pg, mapping);
1db82f2a
MC
3025 if (i == pages - 1)
3026 frag_len -= 4;
3027
3028 skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3029 rx_pg->page = NULL;
3030
bb4f98ab
MC
3031 err = bnx2_alloc_rx_page(bp, rxr,
3032 RX_PG_RING_IDX(pg_prod));
1db82f2a 3033 if (unlikely(err)) {
bb4f98ab
MC
3034 rxr->rx_pg_cons = pg_cons;
3035 rxr->rx_pg_prod = pg_prod;
3036 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
a1f60190 3037 pages - i);
1db82f2a
MC
3038 return err;
3039 }
3040
3d16af86
BL
3041 pci_unmap_page(bp->pdev, mapping_old,
3042 PAGE_SIZE, PCI_DMA_FROMDEVICE);
3043
1db82f2a
MC
3044 frag_size -= frag_len;
3045 skb->data_len += frag_len;
3046 skb->truesize += frag_len;
3047 skb->len += frag_len;
3048
3049 pg_prod = NEXT_RX_BD(pg_prod);
3050 pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3051 }
bb4f98ab
MC
3052 rxr->rx_pg_prod = pg_prod;
3053 rxr->rx_pg_cons = pg_cons;
1db82f2a 3054 }
85833c62
MC
3055 return 0;
3056}
3057
c09c2627 3058static inline u16
35efa7c1 3059bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
c09c2627 3060{
bb4f98ab
MC
3061 u16 cons;
3062
43e80b89
MC
3063 /* Tell compiler that status block fields can change. */
3064 barrier();
3065 cons = *bnapi->hw_rx_cons_ptr;
581daf7e 3066 barrier();
c09c2627
MC
3067 if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3068 cons++;
3069 return cons;
3070}
3071
b6016b76 3072static int
35efa7c1 3073bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
b6016b76 3074{
bb4f98ab 3075 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
b6016b76
MC
3076 u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3077 struct l2_fhdr *rx_hdr;
1db82f2a 3078 int rx_pkt = 0, pg_ring_used = 0;
a33fa66b 3079 struct pci_dev *pdev = bp->pdev;
b6016b76 3080
35efa7c1 3081 hw_cons = bnx2_get_hw_rx_cons(bnapi);
bb4f98ab
MC
3082 sw_cons = rxr->rx_cons;
3083 sw_prod = rxr->rx_prod;
b6016b76
MC
3084
3085 /* Memory barrier necessary as speculative reads of the rx
3086 * buffer can be ahead of the index in the status block
3087 */
3088 rmb();
3089 while (sw_cons != hw_cons) {
1db82f2a 3090 unsigned int len, hdr_len;
ade2bfe7 3091 u32 status;
a33fa66b 3092 struct sw_bd *rx_buf, *next_rx_buf;
b6016b76 3093 struct sk_buff *skb;
236b6394 3094 dma_addr_t dma_addr;
f22828e8
MC
3095 u16 vtag = 0;
3096 int hw_vlan __maybe_unused = 0;
b6016b76
MC
3097
3098 sw_ring_cons = RX_RING_IDX(sw_cons);
3099 sw_ring_prod = RX_RING_IDX(sw_prod);
3100
bb4f98ab 3101 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
b6016b76 3102 skb = rx_buf->skb;
a33fa66b 3103 prefetchw(skb);
236b6394 3104
a33fa66b
MC
3105 if (!get_dma_ops(&pdev->dev)->sync_single_for_cpu) {
3106 next_rx_buf =
3107 &rxr->rx_buf_ring[
3108 RX_RING_IDX(NEXT_RX_BD(sw_cons))];
3109 prefetch(next_rx_buf->desc);
3110 }
236b6394
MC
3111 rx_buf->skb = NULL;
3112
1a4ccc2d 3113 dma_addr = dma_unmap_addr(rx_buf, mapping);
236b6394
MC
3114
3115 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
601d3d18
BL
3116 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3117 PCI_DMA_FROMDEVICE);
b6016b76 3118
a33fa66b 3119 rx_hdr = rx_buf->desc;
1db82f2a 3120 len = rx_hdr->l2_fhdr_pkt_len;
990ec380 3121 status = rx_hdr->l2_fhdr_status;
b6016b76 3122
1db82f2a
MC
3123 hdr_len = 0;
3124 if (status & L2_FHDR_STATUS_SPLIT) {
3125 hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3126 pg_ring_used = 1;
3127 } else if (len > bp->rx_jumbo_thresh) {
3128 hdr_len = bp->rx_jumbo_thresh;
3129 pg_ring_used = 1;
3130 }
3131
990ec380
MC
3132 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3133 L2_FHDR_ERRORS_PHY_DECODE |
3134 L2_FHDR_ERRORS_ALIGNMENT |
3135 L2_FHDR_ERRORS_TOO_SHORT |
3136 L2_FHDR_ERRORS_GIANT_FRAME))) {
3137
3138 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3139 sw_ring_prod);
3140 if (pg_ring_used) {
3141 int pages;
3142
3143 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3144
3145 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3146 }
3147 goto next_rx;
3148 }
3149
1db82f2a 3150 len -= 4;
b6016b76 3151
5d5d0015 3152 if (len <= bp->rx_copy_thresh) {
b6016b76
MC
3153 struct sk_buff *new_skb;
3154
f22828e8 3155 new_skb = netdev_alloc_skb(bp->dev, len + 6);
85833c62 3156 if (new_skb == NULL) {
bb4f98ab 3157 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
85833c62
MC
3158 sw_ring_prod);
3159 goto next_rx;
3160 }
b6016b76
MC
3161
3162 /* aligned copy */
d89cb6af 3163 skb_copy_from_linear_data_offset(skb,
f22828e8
MC
3164 BNX2_RX_OFFSET - 6,
3165 new_skb->data, len + 6);
3166 skb_reserve(new_skb, 6);
b6016b76 3167 skb_put(new_skb, len);
b6016b76 3168
bb4f98ab 3169 bnx2_reuse_rx_skb(bp, rxr, skb,
b6016b76
MC
3170 sw_ring_cons, sw_ring_prod);
3171
3172 skb = new_skb;
bb4f98ab 3173 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
a1f60190 3174 dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
b6016b76 3175 goto next_rx;
b6016b76 3176
f22828e8
MC
3177 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3178 !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
3179 vtag = rx_hdr->l2_fhdr_vlan_tag;
3180#ifdef BCM_VLAN
3181 if (bp->vlgrp)
3182 hw_vlan = 1;
3183 else
3184#endif
3185 {
3186 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
3187 __skb_push(skb, 4);
3188
3189 memmove(ve, skb->data + 4, ETH_ALEN * 2);
3190 ve->h_vlan_proto = htons(ETH_P_8021Q);
3191 ve->h_vlan_TCI = htons(vtag);
3192 len += 4;
3193 }
3194 }
3195
b6016b76
MC
3196 skb->protocol = eth_type_trans(skb, bp->dev);
3197
3198 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
d1e100ba 3199 (ntohs(skb->protocol) != 0x8100)) {
b6016b76 3200
745720e5 3201 dev_kfree_skb(skb);
b6016b76
MC
3202 goto next_rx;
3203
3204 }
3205
b6016b76
MC
3206 skb->ip_summed = CHECKSUM_NONE;
3207 if (bp->rx_csum &&
3208 (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3209 L2_FHDR_STATUS_UDP_DATAGRAM))) {
3210
ade2bfe7
MC
3211 if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3212 L2_FHDR_ERRORS_UDP_XSUM)) == 0))
b6016b76
MC
3213 skb->ip_summed = CHECKSUM_UNNECESSARY;
3214 }
3215
0c8dfc83
DM
3216 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3217
b6016b76 3218#ifdef BCM_VLAN
f22828e8 3219 if (hw_vlan)
c67938a9 3220 vlan_gro_receive(&bnapi->napi, bp->vlgrp, vtag, skb);
b6016b76
MC
3221 else
3222#endif
c67938a9 3223 napi_gro_receive(&bnapi->napi, skb);
b6016b76 3224
b6016b76
MC
3225 rx_pkt++;
3226
3227next_rx:
b6016b76
MC
3228 sw_cons = NEXT_RX_BD(sw_cons);
3229 sw_prod = NEXT_RX_BD(sw_prod);
3230
3231 if ((rx_pkt == budget))
3232 break;
f4e418f7
MC
3233
3234 /* Refresh hw_cons to see if there is new work */
3235 if (sw_cons == hw_cons) {
35efa7c1 3236 hw_cons = bnx2_get_hw_rx_cons(bnapi);
f4e418f7
MC
3237 rmb();
3238 }
b6016b76 3239 }
bb4f98ab
MC
3240 rxr->rx_cons = sw_cons;
3241 rxr->rx_prod = sw_prod;
b6016b76 3242
1db82f2a 3243 if (pg_ring_used)
bb4f98ab 3244 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
1db82f2a 3245
bb4f98ab 3246 REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
b6016b76 3247
bb4f98ab 3248 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
b6016b76
MC
3249
3250 mmiowb();
3251
3252 return rx_pkt;
3253
3254}
3255
3256/* MSI ISR - The only difference between this and the INTx ISR
3257 * is that the MSI interrupt is always serviced.
3258 */
3259static irqreturn_t
7d12e780 3260bnx2_msi(int irq, void *dev_instance)
b6016b76 3261{
f0ea2e63
MC
3262 struct bnx2_napi *bnapi = dev_instance;
3263 struct bnx2 *bp = bnapi->bp;
b6016b76 3264
43e80b89 3265 prefetch(bnapi->status_blk.msi);
b6016b76
MC
3266 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3267 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3268 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3269
3270 /* Return here if interrupt is disabled. */
73eef4cd
MC
3271 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3272 return IRQ_HANDLED;
b6016b76 3273
288379f0 3274 napi_schedule(&bnapi->napi);
b6016b76 3275
73eef4cd 3276 return IRQ_HANDLED;
b6016b76
MC
3277}
3278
8e6a72c4
MC
3279static irqreturn_t
3280bnx2_msi_1shot(int irq, void *dev_instance)
3281{
f0ea2e63
MC
3282 struct bnx2_napi *bnapi = dev_instance;
3283 struct bnx2 *bp = bnapi->bp;
8e6a72c4 3284
43e80b89 3285 prefetch(bnapi->status_blk.msi);
8e6a72c4
MC
3286
3287 /* Return here if interrupt is disabled. */
3288 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3289 return IRQ_HANDLED;
3290
288379f0 3291 napi_schedule(&bnapi->napi);
8e6a72c4
MC
3292
3293 return IRQ_HANDLED;
3294}
3295
b6016b76 3296static irqreturn_t
7d12e780 3297bnx2_interrupt(int irq, void *dev_instance)
b6016b76 3298{
f0ea2e63
MC
3299 struct bnx2_napi *bnapi = dev_instance;
3300 struct bnx2 *bp = bnapi->bp;
43e80b89 3301 struct status_block *sblk = bnapi->status_blk.msi;
b6016b76
MC
3302
3303 /* When using INTx, it is possible for the interrupt to arrive
3304 * at the CPU before the status block posted prior to the
3305 * interrupt. Reading a register will flush the status block.
3306 * When using MSI, the MSI message will always complete after
3307 * the status block write.
3308 */
35efa7c1 3309 if ((sblk->status_idx == bnapi->last_status_idx) &&
b6016b76
MC
3310 (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3311 BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
73eef4cd 3312 return IRQ_NONE;
b6016b76
MC
3313
3314 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3315 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3316 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3317
b8a7ce7b
MC
3318 /* Read back to deassert IRQ immediately to avoid too many
3319 * spurious interrupts.
3320 */
3321 REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3322
b6016b76 3323 /* Return here if interrupt is shared and is disabled. */
73eef4cd
MC
3324 if (unlikely(atomic_read(&bp->intr_sem) != 0))
3325 return IRQ_HANDLED;
b6016b76 3326
288379f0 3327 if (napi_schedule_prep(&bnapi->napi)) {
35efa7c1 3328 bnapi->last_status_idx = sblk->status_idx;
288379f0 3329 __napi_schedule(&bnapi->napi);
b8a7ce7b 3330 }
b6016b76 3331
73eef4cd 3332 return IRQ_HANDLED;
b6016b76
MC
3333}
3334
f4e418f7 3335static inline int
43e80b89 3336bnx2_has_fast_work(struct bnx2_napi *bnapi)
f4e418f7 3337{
35e9010b 3338 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
bb4f98ab 3339 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
f4e418f7 3340
bb4f98ab 3341 if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
35e9010b 3342 (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
f4e418f7 3343 return 1;
43e80b89
MC
3344 return 0;
3345}
3346
3347#define STATUS_ATTN_EVENTS (STATUS_ATTN_BITS_LINK_STATE | \
3348 STATUS_ATTN_BITS_TIMER_ABORT)
3349
3350static inline int
3351bnx2_has_work(struct bnx2_napi *bnapi)
3352{
3353 struct status_block *sblk = bnapi->status_blk.msi;
3354
3355 if (bnx2_has_fast_work(bnapi))
3356 return 1;
f4e418f7 3357
4edd473f
MC
3358#ifdef BCM_CNIC
3359 if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3360 return 1;
3361#endif
3362
da3e4fbe
MC
3363 if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3364 (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
f4e418f7
MC
3365 return 1;
3366
3367 return 0;
3368}
3369
efba0180
MC
3370static void
3371bnx2_chk_missed_msi(struct bnx2 *bp)
3372{
3373 struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3374 u32 msi_ctrl;
3375
3376 if (bnx2_has_work(bnapi)) {
3377 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3378 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3379 return;
3380
3381 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3382 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3383 ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3384 REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3385 bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3386 }
3387 }
3388
3389 bp->idle_chk_status_idx = bnapi->last_status_idx;
3390}
3391
4edd473f
MC
3392#ifdef BCM_CNIC
3393static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3394{
3395 struct cnic_ops *c_ops;
3396
3397 if (!bnapi->cnic_present)
3398 return;
3399
3400 rcu_read_lock();
3401 c_ops = rcu_dereference(bp->cnic_ops);
3402 if (c_ops)
3403 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3404 bnapi->status_blk.msi);
3405 rcu_read_unlock();
3406}
3407#endif
3408
43e80b89 3409static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
b6016b76 3410{
43e80b89 3411 struct status_block *sblk = bnapi->status_blk.msi;
da3e4fbe
MC
3412 u32 status_attn_bits = sblk->status_attn_bits;
3413 u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
b6016b76 3414
da3e4fbe
MC
3415 if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3416 (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
b6016b76 3417
35efa7c1 3418 bnx2_phy_int(bp, bnapi);
bf5295bb
MC
3419
3420 /* This is needed to take care of transient status
3421 * during link changes.
3422 */
3423 REG_WR(bp, BNX2_HC_COMMAND,
3424 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3425 REG_RD(bp, BNX2_HC_COMMAND);
b6016b76 3426 }
43e80b89
MC
3427}
3428
3429static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3430 int work_done, int budget)
3431{
3432 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3433 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
b6016b76 3434
35e9010b 3435 if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
57851d84 3436 bnx2_tx_int(bp, bnapi, 0);
b6016b76 3437
bb4f98ab 3438 if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
35efa7c1 3439 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
6aa20a22 3440
6f535763
DM
3441 return work_done;
3442}
3443
f0ea2e63
MC
3444static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3445{
3446 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3447 struct bnx2 *bp = bnapi->bp;
3448 int work_done = 0;
3449 struct status_block_msix *sblk = bnapi->status_blk.msix;
3450
3451 while (1) {
3452 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3453 if (unlikely(work_done >= budget))
3454 break;
3455
3456 bnapi->last_status_idx = sblk->status_idx;
3457 /* status idx must be read before checking for more work. */
3458 rmb();
3459 if (likely(!bnx2_has_fast_work(bnapi))) {
3460
288379f0 3461 napi_complete(napi);
f0ea2e63
MC
3462 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3463 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3464 bnapi->last_status_idx);
3465 break;
3466 }
3467 }
3468 return work_done;
3469}
3470
6f535763
DM
3471static int bnx2_poll(struct napi_struct *napi, int budget)
3472{
35efa7c1
MC
3473 struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3474 struct bnx2 *bp = bnapi->bp;
6f535763 3475 int work_done = 0;
43e80b89 3476 struct status_block *sblk = bnapi->status_blk.msi;
6f535763
DM
3477
3478 while (1) {
43e80b89
MC
3479 bnx2_poll_link(bp, bnapi);
3480
35efa7c1 3481 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
f4e418f7 3482
4edd473f
MC
3483#ifdef BCM_CNIC
3484 bnx2_poll_cnic(bp, bnapi);
3485#endif
3486
35efa7c1 3487 /* bnapi->last_status_idx is used below to tell the hw how
6dee6421
MC
3488 * much work has been processed, so we must read it before
3489 * checking for more work.
3490 */
35efa7c1 3491 bnapi->last_status_idx = sblk->status_idx;
efba0180
MC
3492
3493 if (unlikely(work_done >= budget))
3494 break;
3495
6dee6421 3496 rmb();
35efa7c1 3497 if (likely(!bnx2_has_work(bnapi))) {
288379f0 3498 napi_complete(napi);
f86e82fb 3499 if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
6f535763
DM
3500 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3501 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
35efa7c1 3502 bnapi->last_status_idx);
6dee6421 3503 break;
6f535763 3504 }
1269a8a6
MC
3505 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3506 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
6f535763 3507 BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
35efa7c1 3508 bnapi->last_status_idx);
1269a8a6 3509
6f535763
DM
3510 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3511 BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
35efa7c1 3512 bnapi->last_status_idx);
6f535763
DM
3513 break;
3514 }
b6016b76
MC
3515 }
3516
bea3348e 3517 return work_done;
b6016b76
MC
3518}
3519
932ff279 3520/* Called with rtnl_lock from vlan functions and also netif_tx_lock
b6016b76
MC
3521 * from set_multicast.
3522 */
3523static void
3524bnx2_set_rx_mode(struct net_device *dev)
3525{
972ec0d4 3526 struct bnx2 *bp = netdev_priv(dev);
b6016b76 3527 u32 rx_mode, sort_mode;
ccffad25 3528 struct netdev_hw_addr *ha;
b6016b76 3529 int i;
b6016b76 3530
9f52b564
MC
3531 if (!netif_running(dev))
3532 return;
3533
c770a65c 3534 spin_lock_bh(&bp->phy_lock);
b6016b76
MC
3535
3536 rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3537 BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3538 sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3539#ifdef BCM_VLAN
7c6337a1 3540 if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
b6016b76 3541 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
b6016b76 3542#else
7c6337a1 3543 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
e29054f9 3544 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
b6016b76
MC
3545#endif
3546 if (dev->flags & IFF_PROMISC) {
3547 /* Promiscuous mode. */
3548 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
7510873d
MC
3549 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3550 BNX2_RPM_SORT_USER0_PROM_VLAN;
b6016b76
MC
3551 }
3552 else if (dev->flags & IFF_ALLMULTI) {
3553 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3554 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3555 0xffffffff);
3556 }
3557 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3558 }
3559 else {
3560 /* Accept one or more multicast(s). */
b6016b76
MC
3561 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3562 u32 regidx;
3563 u32 bit;
3564 u32 crc;
3565
3566 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3567
22bedad3
JP
3568 netdev_for_each_mc_addr(ha, dev) {
3569 crc = ether_crc_le(ETH_ALEN, ha->addr);
b6016b76
MC
3570 bit = crc & 0xff;
3571 regidx = (bit & 0xe0) >> 5;
3572 bit &= 0x1f;
3573 mc_filter[regidx] |= (1 << bit);
3574 }
3575
3576 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3577 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3578 mc_filter[i]);
3579 }
3580
3581 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3582 }
3583
32e7bfc4 3584 if (netdev_uc_count(dev) > BNX2_MAX_UNICAST_ADDRESSES) {
5fcaed01
BL
3585 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3586 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3587 BNX2_RPM_SORT_USER0_PROM_VLAN;
3588 } else if (!(dev->flags & IFF_PROMISC)) {
5fcaed01 3589 /* Add all entries into to the match filter list */
ccffad25 3590 i = 0;
32e7bfc4 3591 netdev_for_each_uc_addr(ha, dev) {
ccffad25 3592 bnx2_set_mac_addr(bp, ha->addr,
5fcaed01
BL
3593 i + BNX2_START_UNICAST_ADDRESS_INDEX);
3594 sort_mode |= (1 <<
3595 (i + BNX2_START_UNICAST_ADDRESS_INDEX));
ccffad25 3596 i++;
5fcaed01
BL
3597 }
3598
3599 }
3600
b6016b76
MC
3601 if (rx_mode != bp->rx_mode) {
3602 bp->rx_mode = rx_mode;
3603 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3604 }
3605
3606 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3607 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3608 REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3609
c770a65c 3610 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
3611}
3612
57579f76
MC
3613static int __devinit
3614check_fw_section(const struct firmware *fw,
3615 const struct bnx2_fw_file_section *section,
3616 u32 alignment, bool non_empty)
3617{
3618 u32 offset = be32_to_cpu(section->offset);
3619 u32 len = be32_to_cpu(section->len);
3620
3621 if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3622 return -EINVAL;
3623 if ((non_empty && len == 0) || len > fw->size - offset ||
3624 len & (alignment - 1))
3625 return -EINVAL;
3626 return 0;
3627}
3628
3629static int __devinit
3630check_mips_fw_entry(const struct firmware *fw,
3631 const struct bnx2_mips_fw_file_entry *entry)
3632{
3633 if (check_fw_section(fw, &entry->text, 4, true) ||
3634 check_fw_section(fw, &entry->data, 4, false) ||
3635 check_fw_section(fw, &entry->rodata, 4, false))
3636 return -EINVAL;
3637 return 0;
3638}
3639
3640static int __devinit
3641bnx2_request_firmware(struct bnx2 *bp)
b6016b76 3642{
57579f76 3643 const char *mips_fw_file, *rv2p_fw_file;
5ee1c326
BB
3644 const struct bnx2_mips_fw_file *mips_fw;
3645 const struct bnx2_rv2p_fw_file *rv2p_fw;
57579f76
MC
3646 int rc;
3647
3648 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3649 mips_fw_file = FW_MIPS_FILE_09;
078b0735
MC
3650 if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3651 (CHIP_ID(bp) == CHIP_ID_5709_A1))
3652 rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3653 else
3654 rv2p_fw_file = FW_RV2P_FILE_09;
57579f76
MC
3655 } else {
3656 mips_fw_file = FW_MIPS_FILE_06;
3657 rv2p_fw_file = FW_RV2P_FILE_06;
3658 }
3659
3660 rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3661 if (rc) {
3a9c6a49 3662 pr_err("Can't load firmware file \"%s\"\n", mips_fw_file);
57579f76
MC
3663 return rc;
3664 }
3665
3666 rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3667 if (rc) {
3a9c6a49 3668 pr_err("Can't load firmware file \"%s\"\n", rv2p_fw_file);
57579f76
MC
3669 return rc;
3670 }
5ee1c326
BB
3671 mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3672 rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3673 if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3674 check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3675 check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3676 check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3677 check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3678 check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3a9c6a49 3679 pr_err("Firmware file \"%s\" is invalid\n", mips_fw_file);
57579f76
MC
3680 return -EINVAL;
3681 }
5ee1c326
BB
3682 if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3683 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3684 check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3a9c6a49 3685 pr_err("Firmware file \"%s\" is invalid\n", rv2p_fw_file);
57579f76
MC
3686 return -EINVAL;
3687 }
3688
3689 return 0;
3690}
3691
3692static u32
3693rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3694{
3695 switch (idx) {
3696 case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3697 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3698 rv2p_code |= RV2P_BD_PAGE_SIZE;
3699 break;
3700 }
3701 return rv2p_code;
3702}
3703
3704static int
3705load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3706 const struct bnx2_rv2p_fw_file_entry *fw_entry)
3707{
3708 u32 rv2p_code_len, file_offset;
3709 __be32 *rv2p_code;
b6016b76 3710 int i;
57579f76
MC
3711 u32 val, cmd, addr;
3712
3713 rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3714 file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3715
3716 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
b6016b76 3717
57579f76
MC
3718 if (rv2p_proc == RV2P_PROC1) {
3719 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3720 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3721 } else {
3722 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3723 addr = BNX2_RV2P_PROC2_ADDR_CMD;
d25be1d3 3724 }
b6016b76
MC
3725
3726 for (i = 0; i < rv2p_code_len; i += 8) {
57579f76 3727 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
b6016b76 3728 rv2p_code++;
57579f76 3729 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
b6016b76
MC
3730 rv2p_code++;
3731
57579f76
MC
3732 val = (i / 8) | cmd;
3733 REG_WR(bp, addr, val);
3734 }
3735
3736 rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3737 for (i = 0; i < 8; i++) {
3738 u32 loc, code;
3739
3740 loc = be32_to_cpu(fw_entry->fixup[i]);
3741 if (loc && ((loc * 4) < rv2p_code_len)) {
3742 code = be32_to_cpu(*(rv2p_code + loc - 1));
3743 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3744 code = be32_to_cpu(*(rv2p_code + loc));
3745 code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3746 REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3747
3748 val = (loc / 2) | cmd;
3749 REG_WR(bp, addr, val);
b6016b76
MC
3750 }
3751 }
3752
3753 /* Reset the processor, un-stall is done later. */
3754 if (rv2p_proc == RV2P_PROC1) {
3755 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3756 }
3757 else {
3758 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3759 }
57579f76
MC
3760
3761 return 0;
b6016b76
MC
3762}
3763
af3ee519 3764static int
57579f76
MC
3765load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3766 const struct bnx2_mips_fw_file_entry *fw_entry)
b6016b76 3767{
57579f76
MC
3768 u32 addr, len, file_offset;
3769 __be32 *data;
b6016b76
MC
3770 u32 offset;
3771 u32 val;
3772
3773 /* Halt the CPU. */
2726d6e1 3774 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
b6016b76 3775 val |= cpu_reg->mode_value_halt;
2726d6e1
MC
3776 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3777 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
b6016b76
MC
3778
3779 /* Load the Text area. */
57579f76
MC
3780 addr = be32_to_cpu(fw_entry->text.addr);
3781 len = be32_to_cpu(fw_entry->text.len);
3782 file_offset = be32_to_cpu(fw_entry->text.offset);
3783 data = (__be32 *)(bp->mips_firmware->data + file_offset);
ea1f8d5c 3784
57579f76
MC
3785 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3786 if (len) {
b6016b76
MC
3787 int j;
3788
57579f76
MC
3789 for (j = 0; j < (len / 4); j++, offset += 4)
3790 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
b6016b76
MC
3791 }
3792
57579f76
MC
3793 /* Load the Data area. */
3794 addr = be32_to_cpu(fw_entry->data.addr);
3795 len = be32_to_cpu(fw_entry->data.len);
3796 file_offset = be32_to_cpu(fw_entry->data.offset);
3797 data = (__be32 *)(bp->mips_firmware->data + file_offset);
b6016b76 3798
57579f76
MC
3799 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3800 if (len) {
b6016b76
MC
3801 int j;
3802
57579f76
MC
3803 for (j = 0; j < (len / 4); j++, offset += 4)
3804 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
b6016b76
MC
3805 }
3806
3807 /* Load the Read-Only area. */
57579f76
MC
3808 addr = be32_to_cpu(fw_entry->rodata.addr);
3809 len = be32_to_cpu(fw_entry->rodata.len);
3810 file_offset = be32_to_cpu(fw_entry->rodata.offset);
3811 data = (__be32 *)(bp->mips_firmware->data + file_offset);
3812
3813 offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3814 if (len) {
b6016b76
MC
3815 int j;
3816
57579f76
MC
3817 for (j = 0; j < (len / 4); j++, offset += 4)
3818 bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
b6016b76
MC
3819 }
3820
3821 /* Clear the pre-fetch instruction. */
2726d6e1 3822 bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
57579f76
MC
3823
3824 val = be32_to_cpu(fw_entry->start_addr);
3825 bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
b6016b76
MC
3826
3827 /* Start the CPU. */
2726d6e1 3828 val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
b6016b76 3829 val &= ~cpu_reg->mode_value_halt;
2726d6e1
MC
3830 bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3831 bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
af3ee519
MC
3832
3833 return 0;
b6016b76
MC
3834}
3835
fba9fe91 3836static int
b6016b76
MC
3837bnx2_init_cpus(struct bnx2 *bp)
3838{
57579f76
MC
3839 const struct bnx2_mips_fw_file *mips_fw =
3840 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3841 const struct bnx2_rv2p_fw_file *rv2p_fw =
3842 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3843 int rc;
b6016b76
MC
3844
3845 /* Initialize the RV2P processor. */
57579f76
MC
3846 load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3847 load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
b6016b76
MC
3848
3849 /* Initialize the RX Processor. */
57579f76 3850 rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
fba9fe91
MC
3851 if (rc)
3852 goto init_cpu_err;
3853
b6016b76 3854 /* Initialize the TX Processor. */
57579f76 3855 rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
fba9fe91
MC
3856 if (rc)
3857 goto init_cpu_err;
3858
b6016b76 3859 /* Initialize the TX Patch-up Processor. */
57579f76 3860 rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
fba9fe91
MC
3861 if (rc)
3862 goto init_cpu_err;
3863
b6016b76 3864 /* Initialize the Completion Processor. */
57579f76 3865 rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
fba9fe91
MC
3866 if (rc)
3867 goto init_cpu_err;
3868
d43584c8 3869 /* Initialize the Command Processor. */
57579f76 3870 rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
b6016b76 3871
fba9fe91 3872init_cpu_err:
fba9fe91 3873 return rc;
b6016b76
MC
3874}
3875
3876static int
829ca9a3 3877bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
b6016b76
MC
3878{
3879 u16 pmcsr;
3880
3881 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3882
3883 switch (state) {
829ca9a3 3884 case PCI_D0: {
b6016b76
MC
3885 u32 val;
3886
3887 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3888 (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3889 PCI_PM_CTRL_PME_STATUS);
3890
3891 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3892 /* delay required during transition out of D3hot */
3893 msleep(20);
3894
3895 val = REG_RD(bp, BNX2_EMAC_MODE);
3896 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3897 val &= ~BNX2_EMAC_MODE_MPKT;
3898 REG_WR(bp, BNX2_EMAC_MODE, val);
3899
3900 val = REG_RD(bp, BNX2_RPM_CONFIG);
3901 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3902 REG_WR(bp, BNX2_RPM_CONFIG, val);
3903 break;
3904 }
829ca9a3 3905 case PCI_D3hot: {
b6016b76
MC
3906 int i;
3907 u32 val, wol_msg;
3908
3909 if (bp->wol) {
3910 u32 advertising;
3911 u8 autoneg;
3912
3913 autoneg = bp->autoneg;
3914 advertising = bp->advertising;
3915
239cd343
MC
3916 if (bp->phy_port == PORT_TP) {
3917 bp->autoneg = AUTONEG_SPEED;
3918 bp->advertising = ADVERTISED_10baseT_Half |
3919 ADVERTISED_10baseT_Full |
3920 ADVERTISED_100baseT_Half |
3921 ADVERTISED_100baseT_Full |
3922 ADVERTISED_Autoneg;
3923 }
b6016b76 3924
239cd343
MC
3925 spin_lock_bh(&bp->phy_lock);
3926 bnx2_setup_phy(bp, bp->phy_port);
3927 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
3928
3929 bp->autoneg = autoneg;
3930 bp->advertising = advertising;
3931
5fcaed01 3932 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
b6016b76
MC
3933
3934 val = REG_RD(bp, BNX2_EMAC_MODE);
3935
3936 /* Enable port mode. */
3937 val &= ~BNX2_EMAC_MODE_PORT;
239cd343 3938 val |= BNX2_EMAC_MODE_MPKT_RCVD |
b6016b76 3939 BNX2_EMAC_MODE_ACPI_RCVD |
b6016b76 3940 BNX2_EMAC_MODE_MPKT;
239cd343
MC
3941 if (bp->phy_port == PORT_TP)
3942 val |= BNX2_EMAC_MODE_PORT_MII;
3943 else {
3944 val |= BNX2_EMAC_MODE_PORT_GMII;
3945 if (bp->line_speed == SPEED_2500)
3946 val |= BNX2_EMAC_MODE_25G_MODE;
3947 }
b6016b76
MC
3948
3949 REG_WR(bp, BNX2_EMAC_MODE, val);
3950
3951 /* receive all multicast */
3952 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3953 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3954 0xffffffff);
3955 }
3956 REG_WR(bp, BNX2_EMAC_RX_MODE,
3957 BNX2_EMAC_RX_MODE_SORT_MODE);
3958
3959 val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3960 BNX2_RPM_SORT_USER0_MC_EN;
3961 REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3962 REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3963 REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3964 BNX2_RPM_SORT_USER0_ENA);
3965
3966 /* Need to enable EMAC and RPM for WOL. */
3967 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3968 BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3969 BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3970 BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3971
3972 val = REG_RD(bp, BNX2_RPM_CONFIG);
3973 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3974 REG_WR(bp, BNX2_RPM_CONFIG, val);
3975
3976 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3977 }
3978 else {
3979 wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3980 }
3981
f86e82fb 3982 if (!(bp->flags & BNX2_FLAG_NO_WOL))
a2f13890
MC
3983 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3984 1, 0);
b6016b76
MC
3985
3986 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3987 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3988 (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3989
3990 if (bp->wol)
3991 pmcsr |= 3;
3992 }
3993 else {
3994 pmcsr |= 3;
3995 }
3996 if (bp->wol) {
3997 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3998 }
3999 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
4000 pmcsr);
4001
4002 /* No more memory access after this point until
4003 * device is brought back to D0.
4004 */
4005 udelay(50);
4006 break;
4007 }
4008 default:
4009 return -EINVAL;
4010 }
4011 return 0;
4012}
4013
4014static int
4015bnx2_acquire_nvram_lock(struct bnx2 *bp)
4016{
4017 u32 val;
4018 int j;
4019
4020 /* Request access to the flash interface. */
4021 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4022 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4023 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4024 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4025 break;
4026
4027 udelay(5);
4028 }
4029
4030 if (j >= NVRAM_TIMEOUT_COUNT)
4031 return -EBUSY;
4032
4033 return 0;
4034}
4035
4036static int
4037bnx2_release_nvram_lock(struct bnx2 *bp)
4038{
4039 int j;
4040 u32 val;
4041
4042 /* Relinquish nvram interface. */
4043 REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4044
4045 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4046 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4047 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4048 break;
4049
4050 udelay(5);
4051 }
4052
4053 if (j >= NVRAM_TIMEOUT_COUNT)
4054 return -EBUSY;
4055
4056 return 0;
4057}
4058
4059
4060static int
4061bnx2_enable_nvram_write(struct bnx2 *bp)
4062{
4063 u32 val;
4064
4065 val = REG_RD(bp, BNX2_MISC_CFG);
4066 REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4067
e30372c9 4068 if (bp->flash_info->flags & BNX2_NV_WREN) {
b6016b76
MC
4069 int j;
4070
4071 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4072 REG_WR(bp, BNX2_NVM_COMMAND,
4073 BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4074
4075 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4076 udelay(5);
4077
4078 val = REG_RD(bp, BNX2_NVM_COMMAND);
4079 if (val & BNX2_NVM_COMMAND_DONE)
4080 break;
4081 }
4082
4083 if (j >= NVRAM_TIMEOUT_COUNT)
4084 return -EBUSY;
4085 }
4086 return 0;
4087}
4088
4089static void
4090bnx2_disable_nvram_write(struct bnx2 *bp)
4091{
4092 u32 val;
4093
4094 val = REG_RD(bp, BNX2_MISC_CFG);
4095 REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4096}
4097
4098
4099static void
4100bnx2_enable_nvram_access(struct bnx2 *bp)
4101{
4102 u32 val;
4103
4104 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4105 /* Enable both bits, even on read. */
6aa20a22 4106 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
b6016b76
MC
4107 val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4108}
4109
4110static void
4111bnx2_disable_nvram_access(struct bnx2 *bp)
4112{
4113 u32 val;
4114
4115 val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4116 /* Disable both bits, even after read. */
6aa20a22 4117 REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
b6016b76
MC
4118 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4119 BNX2_NVM_ACCESS_ENABLE_WR_EN));
4120}
4121
4122static int
4123bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4124{
4125 u32 cmd;
4126 int j;
4127
e30372c9 4128 if (bp->flash_info->flags & BNX2_NV_BUFFERED)
b6016b76
MC
4129 /* Buffered flash, no erase needed */
4130 return 0;
4131
4132 /* Build an erase command */
4133 cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4134 BNX2_NVM_COMMAND_DOIT;
4135
4136 /* Need to clear DONE bit separately. */
4137 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4138
4139 /* Address of the NVRAM to read from. */
4140 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4141
4142 /* Issue an erase command. */
4143 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4144
4145 /* Wait for completion. */
4146 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4147 u32 val;
4148
4149 udelay(5);
4150
4151 val = REG_RD(bp, BNX2_NVM_COMMAND);
4152 if (val & BNX2_NVM_COMMAND_DONE)
4153 break;
4154 }
4155
4156 if (j >= NVRAM_TIMEOUT_COUNT)
4157 return -EBUSY;
4158
4159 return 0;
4160}
4161
4162static int
4163bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4164{
4165 u32 cmd;
4166 int j;
4167
4168 /* Build the command word. */
4169 cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4170
e30372c9
MC
4171 /* Calculate an offset of a buffered flash, not needed for 5709. */
4172 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
b6016b76
MC
4173 offset = ((offset / bp->flash_info->page_size) <<
4174 bp->flash_info->page_bits) +
4175 (offset % bp->flash_info->page_size);
4176 }
4177
4178 /* Need to clear DONE bit separately. */
4179 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4180
4181 /* Address of the NVRAM to read from. */
4182 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4183
4184 /* Issue a read command. */
4185 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4186
4187 /* Wait for completion. */
4188 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4189 u32 val;
4190
4191 udelay(5);
4192
4193 val = REG_RD(bp, BNX2_NVM_COMMAND);
4194 if (val & BNX2_NVM_COMMAND_DONE) {
b491edd5
AV
4195 __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4196 memcpy(ret_val, &v, 4);
b6016b76
MC
4197 break;
4198 }
4199 }
4200 if (j >= NVRAM_TIMEOUT_COUNT)
4201 return -EBUSY;
4202
4203 return 0;
4204}
4205
4206
4207static int
4208bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4209{
b491edd5
AV
4210 u32 cmd;
4211 __be32 val32;
b6016b76
MC
4212 int j;
4213
4214 /* Build the command word. */
4215 cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4216
e30372c9
MC
4217 /* Calculate an offset of a buffered flash, not needed for 5709. */
4218 if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
b6016b76
MC
4219 offset = ((offset / bp->flash_info->page_size) <<
4220 bp->flash_info->page_bits) +
4221 (offset % bp->flash_info->page_size);
4222 }
4223
4224 /* Need to clear DONE bit separately. */
4225 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4226
4227 memcpy(&val32, val, 4);
b6016b76
MC
4228
4229 /* Write the data. */
b491edd5 4230 REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
b6016b76
MC
4231
4232 /* Address of the NVRAM to write to. */
4233 REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4234
4235 /* Issue the write command. */
4236 REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4237
4238 /* Wait for completion. */
4239 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4240 udelay(5);
4241
4242 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4243 break;
4244 }
4245 if (j >= NVRAM_TIMEOUT_COUNT)
4246 return -EBUSY;
4247
4248 return 0;
4249}
4250
4251static int
4252bnx2_init_nvram(struct bnx2 *bp)
4253{
4254 u32 val;
e30372c9 4255 int j, entry_count, rc = 0;
0ced9d01 4256 const struct flash_spec *flash;
b6016b76 4257
e30372c9
MC
4258 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4259 bp->flash_info = &flash_5709;
4260 goto get_flash_size;
4261 }
4262
b6016b76
MC
4263 /* Determine the selected interface. */
4264 val = REG_RD(bp, BNX2_NVM_CFG1);
4265
ff8ac609 4266 entry_count = ARRAY_SIZE(flash_table);
b6016b76 4267
b6016b76
MC
4268 if (val & 0x40000000) {
4269
4270 /* Flash interface has been reconfigured */
4271 for (j = 0, flash = &flash_table[0]; j < entry_count;
37137709
MC
4272 j++, flash++) {
4273 if ((val & FLASH_BACKUP_STRAP_MASK) ==
4274 (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
b6016b76
MC
4275 bp->flash_info = flash;
4276 break;
4277 }
4278 }
4279 }
4280 else {
37137709 4281 u32 mask;
b6016b76
MC
4282 /* Not yet been reconfigured */
4283
37137709
MC
4284 if (val & (1 << 23))
4285 mask = FLASH_BACKUP_STRAP_MASK;
4286 else
4287 mask = FLASH_STRAP_MASK;
4288
b6016b76
MC
4289 for (j = 0, flash = &flash_table[0]; j < entry_count;
4290 j++, flash++) {
4291
37137709 4292 if ((val & mask) == (flash->strapping & mask)) {
b6016b76
MC
4293 bp->flash_info = flash;
4294
4295 /* Request access to the flash interface. */
4296 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4297 return rc;
4298
4299 /* Enable access to flash interface */
4300 bnx2_enable_nvram_access(bp);
4301
4302 /* Reconfigure the flash interface */
4303 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4304 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4305 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4306 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4307
4308 /* Disable access to flash interface */
4309 bnx2_disable_nvram_access(bp);
4310 bnx2_release_nvram_lock(bp);
4311
4312 break;
4313 }
4314 }
4315 } /* if (val & 0x40000000) */
4316
4317 if (j == entry_count) {
4318 bp->flash_info = NULL;
3a9c6a49 4319 pr_alert("Unknown flash/EEPROM type\n");
1122db71 4320 return -ENODEV;
b6016b76
MC
4321 }
4322
e30372c9 4323get_flash_size:
2726d6e1 4324 val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
1122db71
MC
4325 val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4326 if (val)
4327 bp->flash_size = val;
4328 else
4329 bp->flash_size = bp->flash_info->total_size;
4330
b6016b76
MC
4331 return rc;
4332}
4333
4334static int
4335bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4336 int buf_size)
4337{
4338 int rc = 0;
4339 u32 cmd_flags, offset32, len32, extra;
4340
4341 if (buf_size == 0)
4342 return 0;
4343
4344 /* Request access to the flash interface. */
4345 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4346 return rc;
4347
4348 /* Enable access to flash interface */
4349 bnx2_enable_nvram_access(bp);
4350
4351 len32 = buf_size;
4352 offset32 = offset;
4353 extra = 0;
4354
4355 cmd_flags = 0;
4356
4357 if (offset32 & 3) {
4358 u8 buf[4];
4359 u32 pre_len;
4360
4361 offset32 &= ~3;
4362 pre_len = 4 - (offset & 3);
4363
4364 if (pre_len >= len32) {
4365 pre_len = len32;
4366 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4367 BNX2_NVM_COMMAND_LAST;
4368 }
4369 else {
4370 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4371 }
4372
4373 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4374
4375 if (rc)
4376 return rc;
4377
4378 memcpy(ret_buf, buf + (offset & 3), pre_len);
4379
4380 offset32 += 4;
4381 ret_buf += pre_len;
4382 len32 -= pre_len;
4383 }
4384 if (len32 & 3) {
4385 extra = 4 - (len32 & 3);
4386 len32 = (len32 + 4) & ~3;
4387 }
4388
4389 if (len32 == 4) {
4390 u8 buf[4];
4391
4392 if (cmd_flags)
4393 cmd_flags = BNX2_NVM_COMMAND_LAST;
4394 else
4395 cmd_flags = BNX2_NVM_COMMAND_FIRST |
4396 BNX2_NVM_COMMAND_LAST;
4397
4398 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4399
4400 memcpy(ret_buf, buf, 4 - extra);
4401 }
4402 else if (len32 > 0) {
4403 u8 buf[4];
4404
4405 /* Read the first word. */
4406 if (cmd_flags)
4407 cmd_flags = 0;
4408 else
4409 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4410
4411 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4412
4413 /* Advance to the next dword. */
4414 offset32 += 4;
4415 ret_buf += 4;
4416 len32 -= 4;
4417
4418 while (len32 > 4 && rc == 0) {
4419 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4420
4421 /* Advance to the next dword. */
4422 offset32 += 4;
4423 ret_buf += 4;
4424 len32 -= 4;
4425 }
4426
4427 if (rc)
4428 return rc;
4429
4430 cmd_flags = BNX2_NVM_COMMAND_LAST;
4431 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4432
4433 memcpy(ret_buf, buf, 4 - extra);
4434 }
4435
4436 /* Disable access to flash interface */
4437 bnx2_disable_nvram_access(bp);
4438
4439 bnx2_release_nvram_lock(bp);
4440
4441 return rc;
4442}
4443
4444static int
4445bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4446 int buf_size)
4447{
4448 u32 written, offset32, len32;
e6be763f 4449 u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
b6016b76
MC
4450 int rc = 0;
4451 int align_start, align_end;
4452
4453 buf = data_buf;
4454 offset32 = offset;
4455 len32 = buf_size;
4456 align_start = align_end = 0;
4457
4458 if ((align_start = (offset32 & 3))) {
4459 offset32 &= ~3;
c873879c
MC
4460 len32 += align_start;
4461 if (len32 < 4)
4462 len32 = 4;
b6016b76
MC
4463 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4464 return rc;
4465 }
4466
4467 if (len32 & 3) {
c873879c
MC
4468 align_end = 4 - (len32 & 3);
4469 len32 += align_end;
4470 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4471 return rc;
b6016b76
MC
4472 }
4473
4474 if (align_start || align_end) {
e6be763f
MC
4475 align_buf = kmalloc(len32, GFP_KERNEL);
4476 if (align_buf == NULL)
b6016b76
MC
4477 return -ENOMEM;
4478 if (align_start) {
e6be763f 4479 memcpy(align_buf, start, 4);
b6016b76
MC
4480 }
4481 if (align_end) {
e6be763f 4482 memcpy(align_buf + len32 - 4, end, 4);
b6016b76 4483 }
e6be763f
MC
4484 memcpy(align_buf + align_start, data_buf, buf_size);
4485 buf = align_buf;
b6016b76
MC
4486 }
4487
e30372c9 4488 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
ae181bc4
MC
4489 flash_buffer = kmalloc(264, GFP_KERNEL);
4490 if (flash_buffer == NULL) {
4491 rc = -ENOMEM;
4492 goto nvram_write_end;
4493 }
4494 }
4495
b6016b76
MC
4496 written = 0;
4497 while ((written < len32) && (rc == 0)) {
4498 u32 page_start, page_end, data_start, data_end;
4499 u32 addr, cmd_flags;
4500 int i;
b6016b76
MC
4501
4502 /* Find the page_start addr */
4503 page_start = offset32 + written;
4504 page_start -= (page_start % bp->flash_info->page_size);
4505 /* Find the page_end addr */
4506 page_end = page_start + bp->flash_info->page_size;
4507 /* Find the data_start addr */
4508 data_start = (written == 0) ? offset32 : page_start;
4509 /* Find the data_end addr */
6aa20a22 4510 data_end = (page_end > offset32 + len32) ?
b6016b76
MC
4511 (offset32 + len32) : page_end;
4512
4513 /* Request access to the flash interface. */
4514 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4515 goto nvram_write_end;
4516
4517 /* Enable access to flash interface */
4518 bnx2_enable_nvram_access(bp);
4519
4520 cmd_flags = BNX2_NVM_COMMAND_FIRST;
e30372c9 4521 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
b6016b76
MC
4522 int j;
4523
4524 /* Read the whole page into the buffer
4525 * (non-buffer flash only) */
4526 for (j = 0; j < bp->flash_info->page_size; j += 4) {
4527 if (j == (bp->flash_info->page_size - 4)) {
4528 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4529 }
4530 rc = bnx2_nvram_read_dword(bp,
6aa20a22
JG
4531 page_start + j,
4532 &flash_buffer[j],
b6016b76
MC
4533 cmd_flags);
4534
4535 if (rc)
4536 goto nvram_write_end;
4537
4538 cmd_flags = 0;
4539 }
4540 }
4541
4542 /* Enable writes to flash interface (unlock write-protect) */
4543 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4544 goto nvram_write_end;
4545
b6016b76
MC
4546 /* Loop to write back the buffer data from page_start to
4547 * data_start */
4548 i = 0;
e30372c9 4549 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
c873879c
MC
4550 /* Erase the page */
4551 if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4552 goto nvram_write_end;
4553
4554 /* Re-enable the write again for the actual write */
4555 bnx2_enable_nvram_write(bp);
4556
b6016b76
MC
4557 for (addr = page_start; addr < data_start;
4558 addr += 4, i += 4) {
6aa20a22 4559
b6016b76
MC
4560 rc = bnx2_nvram_write_dword(bp, addr,
4561 &flash_buffer[i], cmd_flags);
4562
4563 if (rc != 0)
4564 goto nvram_write_end;
4565
4566 cmd_flags = 0;
4567 }
4568 }
4569
4570 /* Loop to write the new data from data_start to data_end */
bae25761 4571 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
b6016b76 4572 if ((addr == page_end - 4) ||
e30372c9 4573 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
b6016b76
MC
4574 (addr == data_end - 4))) {
4575
4576 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4577 }
4578 rc = bnx2_nvram_write_dword(bp, addr, buf,
4579 cmd_flags);
4580
4581 if (rc != 0)
4582 goto nvram_write_end;
4583
4584 cmd_flags = 0;
4585 buf += 4;
4586 }
4587
4588 /* Loop to write back the buffer data from data_end
4589 * to page_end */
e30372c9 4590 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
b6016b76
MC
4591 for (addr = data_end; addr < page_end;
4592 addr += 4, i += 4) {
6aa20a22 4593
b6016b76
MC
4594 if (addr == page_end-4) {
4595 cmd_flags = BNX2_NVM_COMMAND_LAST;
4596 }
4597 rc = bnx2_nvram_write_dword(bp, addr,
4598 &flash_buffer[i], cmd_flags);
4599
4600 if (rc != 0)
4601 goto nvram_write_end;
4602
4603 cmd_flags = 0;
4604 }
4605 }
4606
4607 /* Disable writes to flash interface (lock write-protect) */
4608 bnx2_disable_nvram_write(bp);
4609
4610 /* Disable access to flash interface */
4611 bnx2_disable_nvram_access(bp);
4612 bnx2_release_nvram_lock(bp);
4613
4614 /* Increment written */
4615 written += data_end - data_start;
4616 }
4617
4618nvram_write_end:
e6be763f
MC
4619 kfree(flash_buffer);
4620 kfree(align_buf);
b6016b76
MC
4621 return rc;
4622}
4623
0d8a6571 4624static void
7c62e83b 4625bnx2_init_fw_cap(struct bnx2 *bp)
0d8a6571 4626{
7c62e83b 4627 u32 val, sig = 0;
0d8a6571 4628
583c28e5 4629 bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
7c62e83b
MC
4630 bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4631
4632 if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4633 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
0d8a6571 4634
2726d6e1 4635 val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
0d8a6571
MC
4636 if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4637 return;
4638
7c62e83b
MC
4639 if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4640 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4641 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4642 }
4643
4644 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4645 (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4646 u32 link;
4647
583c28e5 4648 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
0d8a6571 4649
7c62e83b
MC
4650 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4651 if (link & BNX2_LINK_STATUS_SERDES_LINK)
0d8a6571
MC
4652 bp->phy_port = PORT_FIBRE;
4653 else
4654 bp->phy_port = PORT_TP;
489310a4 4655
7c62e83b
MC
4656 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4657 BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
0d8a6571 4658 }
7c62e83b
MC
4659
4660 if (netif_running(bp->dev) && sig)
4661 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
0d8a6571
MC
4662}
4663
b4b36042
MC
4664static void
4665bnx2_setup_msix_tbl(struct bnx2 *bp)
4666{
4667 REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4668
4669 REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4670 REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4671}
4672
b6016b76
MC
4673static int
4674bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4675{
4676 u32 val;
4677 int i, rc = 0;
489310a4 4678 u8 old_port;
b6016b76
MC
4679
4680 /* Wait for the current PCI transaction to complete before
4681 * issuing a reset. */
4682 REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4683 BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4684 BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4685 BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4686 BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4687 val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4688 udelay(5);
4689
b090ae2b 4690 /* Wait for the firmware to tell us it is ok to issue a reset. */
a2f13890 4691 bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
b090ae2b 4692
b6016b76
MC
4693 /* Deposit a driver reset signature so the firmware knows that
4694 * this is a soft reset. */
2726d6e1
MC
4695 bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4696 BNX2_DRV_RESET_SIGNATURE_MAGIC);
b6016b76 4697
b6016b76
MC
4698 /* Do a dummy read to force the chip to complete all current transaction
4699 * before we issue a reset. */
4700 val = REG_RD(bp, BNX2_MISC_ID);
4701
234754d5
MC
4702 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4703 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4704 REG_RD(bp, BNX2_MISC_COMMAND);
4705 udelay(5);
b6016b76 4706
234754d5
MC
4707 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4708 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
b6016b76 4709
234754d5 4710 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
b6016b76 4711
234754d5
MC
4712 } else {
4713 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4714 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4715 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4716
4717 /* Chip reset. */
4718 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4719
594a9dfa
MC
4720 /* Reading back any register after chip reset will hang the
4721 * bus on 5706 A0 and A1. The msleep below provides plenty
4722 * of margin for write posting.
4723 */
234754d5 4724 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
8e545881
AV
4725 (CHIP_ID(bp) == CHIP_ID_5706_A1))
4726 msleep(20);
b6016b76 4727
234754d5
MC
4728 /* Reset takes approximate 30 usec */
4729 for (i = 0; i < 10; i++) {
4730 val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4731 if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4732 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4733 break;
4734 udelay(10);
4735 }
4736
4737 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4738 BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
3a9c6a49 4739 pr_err("Chip reset did not complete\n");
234754d5
MC
4740 return -EBUSY;
4741 }
b6016b76
MC
4742 }
4743
4744 /* Make sure byte swapping is properly configured. */
4745 val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4746 if (val != 0x01020304) {
3a9c6a49 4747 pr_err("Chip not in correct endian mode\n");
b6016b76
MC
4748 return -ENODEV;
4749 }
4750
b6016b76 4751 /* Wait for the firmware to finish its initialization. */
a2f13890 4752 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
b090ae2b
MC
4753 if (rc)
4754 return rc;
b6016b76 4755
0d8a6571 4756 spin_lock_bh(&bp->phy_lock);
489310a4 4757 old_port = bp->phy_port;
7c62e83b 4758 bnx2_init_fw_cap(bp);
583c28e5
MC
4759 if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4760 old_port != bp->phy_port)
0d8a6571
MC
4761 bnx2_set_default_remote_link(bp);
4762 spin_unlock_bh(&bp->phy_lock);
4763
b6016b76
MC
4764 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4765 /* Adjust the voltage regular to two steps lower. The default
4766 * of this register is 0x0000000e. */
4767 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4768
4769 /* Remove bad rbuf memory from the free pool. */
4770 rc = bnx2_alloc_bad_rbuf(bp);
4771 }
4772
c441b8d2 4773 if (bp->flags & BNX2_FLAG_USING_MSIX) {
b4b36042 4774 bnx2_setup_msix_tbl(bp);
c441b8d2
MC
4775 /* Prevent MSIX table reads and write from timing out */
4776 REG_WR(bp, BNX2_MISC_ECO_HW_CTL,
4777 BNX2_MISC_ECO_HW_CTL_LARGE_GRC_TMOUT_EN);
4778 }
b4b36042 4779
b6016b76
MC
4780 return rc;
4781}
4782
4783static int
4784bnx2_init_chip(struct bnx2 *bp)
4785{
d8026d93 4786 u32 val, mtu;
b4b36042 4787 int rc, i;
b6016b76
MC
4788
4789 /* Make sure the interrupt is not active. */
4790 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4791
4792 val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4793 BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4794#ifdef __BIG_ENDIAN
6aa20a22 4795 BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
b6016b76 4796#endif
6aa20a22 4797 BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
b6016b76
MC
4798 DMA_READ_CHANS << 12 |
4799 DMA_WRITE_CHANS << 16;
4800
4801 val |= (0x2 << 20) | (1 << 11);
4802
f86e82fb 4803 if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
b6016b76
MC
4804 val |= (1 << 23);
4805
4806 if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
f86e82fb 4807 (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
b6016b76
MC
4808 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4809
4810 REG_WR(bp, BNX2_DMA_CONFIG, val);
4811
4812 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4813 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4814 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4815 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4816 }
4817
f86e82fb 4818 if (bp->flags & BNX2_FLAG_PCIX) {
b6016b76
MC
4819 u16 val16;
4820
4821 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4822 &val16);
4823 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4824 val16 & ~PCI_X_CMD_ERO);
4825 }
4826
4827 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4828 BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4829 BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4830 BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4831
4832 /* Initialize context mapping and zero out the quick contexts. The
4833 * context block must have already been enabled. */
641bdcd5
MC
4834 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4835 rc = bnx2_init_5709_context(bp);
4836 if (rc)
4837 return rc;
4838 } else
59b47d8a 4839 bnx2_init_context(bp);
b6016b76 4840
fba9fe91
MC
4841 if ((rc = bnx2_init_cpus(bp)) != 0)
4842 return rc;
4843
b6016b76
MC
4844 bnx2_init_nvram(bp);
4845
5fcaed01 4846 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
b6016b76
MC
4847
4848 val = REG_RD(bp, BNX2_MQ_CONFIG);
4849 val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4850 val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4edd473f
MC
4851 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4852 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4853 if (CHIP_REV(bp) == CHIP_REV_Ax)
4854 val |= BNX2_MQ_CONFIG_HALT_DIS;
4855 }
68c9f75a 4856
b6016b76
MC
4857 REG_WR(bp, BNX2_MQ_CONFIG, val);
4858
4859 val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4860 REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4861 REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4862
4863 val = (BCM_PAGE_BITS - 8) << 24;
4864 REG_WR(bp, BNX2_RV2P_CONFIG, val);
4865
4866 /* Configure page size. */
4867 val = REG_RD(bp, BNX2_TBDR_CONFIG);
4868 val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4869 val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4870 REG_WR(bp, BNX2_TBDR_CONFIG, val);
4871
4872 val = bp->mac_addr[0] +
4873 (bp->mac_addr[1] << 8) +
4874 (bp->mac_addr[2] << 16) +
4875 bp->mac_addr[3] +
4876 (bp->mac_addr[4] << 8) +
4877 (bp->mac_addr[5] << 16);
4878 REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4879
4880 /* Program the MTU. Also include 4 bytes for CRC32. */
d8026d93
MC
4881 mtu = bp->dev->mtu;
4882 val = mtu + ETH_HLEN + ETH_FCS_LEN;
b6016b76
MC
4883 if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4884 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4885 REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4886
d8026d93
MC
4887 if (mtu < 1500)
4888 mtu = 1500;
4889
4890 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4891 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4892 bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4893
155d5561 4894 memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
b4b36042
MC
4895 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4896 bp->bnx2_napi[i].last_status_idx = 0;
4897
efba0180
MC
4898 bp->idle_chk_status_idx = 0xffff;
4899
b6016b76
MC
4900 bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4901
4902 /* Set up how to generate a link change interrupt. */
4903 REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4904
4905 REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4906 (u64) bp->status_blk_mapping & 0xffffffff);
4907 REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4908
4909 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4910 (u64) bp->stats_blk_mapping & 0xffffffff);
4911 REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4912 (u64) bp->stats_blk_mapping >> 32);
4913
6aa20a22 4914 REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
b6016b76
MC
4915 (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4916
4917 REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4918 (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4919
4920 REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4921 (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4922
4923 REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4924
4925 REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4926
4927 REG_WR(bp, BNX2_HC_COM_TICKS,
4928 (bp->com_ticks_int << 16) | bp->com_ticks);
4929
4930 REG_WR(bp, BNX2_HC_CMD_TICKS,
4931 (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4932
61d9e3fa 4933 if (bp->flags & BNX2_FLAG_BROKEN_STATS)
02537b06
MC
4934 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4935 else
7ea6920e 4936 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
b6016b76
MC
4937 REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8); /* 3ms */
4938
4939 if (CHIP_ID(bp) == CHIP_ID_5706_A1)
8e6a72c4 4940 val = BNX2_HC_CONFIG_COLLECT_STATS;
b6016b76 4941 else {
8e6a72c4
MC
4942 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4943 BNX2_HC_CONFIG_COLLECT_STATS;
b6016b76
MC
4944 }
4945
efde73a3 4946 if (bp->flags & BNX2_FLAG_USING_MSIX) {
c76c0475
MC
4947 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4948 BNX2_HC_MSIX_BIT_VECTOR_VAL);
4949
5e9ad9e1
MC
4950 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4951 }
4952
4953 if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
cf7474a6 4954 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
5e9ad9e1
MC
4955
4956 REG_WR(bp, BNX2_HC_CONFIG, val);
4957
4958 for (i = 1; i < bp->irq_nvecs; i++) {
4959 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4960 BNX2_HC_SB_CONFIG_1;
4961
6f743ca0 4962 REG_WR(bp, base,
c76c0475 4963 BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
5e9ad9e1 4964 BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
c76c0475
MC
4965 BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4966
6f743ca0 4967 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
c76c0475
MC
4968 (bp->tx_quick_cons_trip_int << 16) |
4969 bp->tx_quick_cons_trip);
4970
6f743ca0 4971 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
c76c0475
MC
4972 (bp->tx_ticks_int << 16) | bp->tx_ticks);
4973
5e9ad9e1
MC
4974 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4975 (bp->rx_quick_cons_trip_int << 16) |
4976 bp->rx_quick_cons_trip);
8e6a72c4 4977
5e9ad9e1
MC
4978 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4979 (bp->rx_ticks_int << 16) | bp->rx_ticks);
4980 }
8e6a72c4 4981
b6016b76
MC
4982 /* Clear internal stats counters. */
4983 REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4984
da3e4fbe 4985 REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
b6016b76
MC
4986
4987 /* Initialize the receive filter. */
4988 bnx2_set_rx_mode(bp->dev);
4989
0aa38df7
MC
4990 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4991 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4992 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4993 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4994 }
b090ae2b 4995 rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
a2f13890 4996 1, 0);
b6016b76 4997
df149d70 4998 REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
b6016b76
MC
4999 REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
5000
5001 udelay(20);
5002
bf5295bb
MC
5003 bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
5004
b090ae2b 5005 return rc;
b6016b76
MC
5006}
5007
c76c0475
MC
5008static void
5009bnx2_clear_ring_states(struct bnx2 *bp)
5010{
5011 struct bnx2_napi *bnapi;
35e9010b 5012 struct bnx2_tx_ring_info *txr;
bb4f98ab 5013 struct bnx2_rx_ring_info *rxr;
c76c0475
MC
5014 int i;
5015
5016 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
5017 bnapi = &bp->bnx2_napi[i];
35e9010b 5018 txr = &bnapi->tx_ring;
bb4f98ab 5019 rxr = &bnapi->rx_ring;
c76c0475 5020
35e9010b
MC
5021 txr->tx_cons = 0;
5022 txr->hw_tx_cons = 0;
bb4f98ab
MC
5023 rxr->rx_prod_bseq = 0;
5024 rxr->rx_prod = 0;
5025 rxr->rx_cons = 0;
5026 rxr->rx_pg_prod = 0;
5027 rxr->rx_pg_cons = 0;
c76c0475
MC
5028 }
5029}
5030
59b47d8a 5031static void
35e9010b 5032bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
59b47d8a
MC
5033{
5034 u32 val, offset0, offset1, offset2, offset3;
62a8313c 5035 u32 cid_addr = GET_CID_ADDR(cid);
59b47d8a
MC
5036
5037 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5038 offset0 = BNX2_L2CTX_TYPE_XI;
5039 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5040 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5041 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5042 } else {
5043 offset0 = BNX2_L2CTX_TYPE;
5044 offset1 = BNX2_L2CTX_CMD_TYPE;
5045 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5046 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5047 }
5048 val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
62a8313c 5049 bnx2_ctx_wr(bp, cid_addr, offset0, val);
59b47d8a
MC
5050
5051 val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
62a8313c 5052 bnx2_ctx_wr(bp, cid_addr, offset1, val);
59b47d8a 5053
35e9010b 5054 val = (u64) txr->tx_desc_mapping >> 32;
62a8313c 5055 bnx2_ctx_wr(bp, cid_addr, offset2, val);
59b47d8a 5056
35e9010b 5057 val = (u64) txr->tx_desc_mapping & 0xffffffff;
62a8313c 5058 bnx2_ctx_wr(bp, cid_addr, offset3, val);
59b47d8a 5059}
b6016b76
MC
5060
5061static void
35e9010b 5062bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
b6016b76
MC
5063{
5064 struct tx_bd *txbd;
c76c0475
MC
5065 u32 cid = TX_CID;
5066 struct bnx2_napi *bnapi;
35e9010b 5067 struct bnx2_tx_ring_info *txr;
c76c0475 5068
35e9010b
MC
5069 bnapi = &bp->bnx2_napi[ring_num];
5070 txr = &bnapi->tx_ring;
5071
5072 if (ring_num == 0)
5073 cid = TX_CID;
5074 else
5075 cid = TX_TSS_CID + ring_num - 1;
b6016b76 5076
2f8af120
MC
5077 bp->tx_wake_thresh = bp->tx_ring_size / 2;
5078
35e9010b 5079 txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
6aa20a22 5080
35e9010b
MC
5081 txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5082 txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
b6016b76 5083
35e9010b
MC
5084 txr->tx_prod = 0;
5085 txr->tx_prod_bseq = 0;
6aa20a22 5086
35e9010b
MC
5087 txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5088 txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
b6016b76 5089
35e9010b 5090 bnx2_init_tx_context(bp, cid, txr);
b6016b76
MC
5091}
5092
5093static void
5d5d0015
MC
5094bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5095 int num_rings)
b6016b76 5096{
b6016b76 5097 int i;
5d5d0015 5098 struct rx_bd *rxbd;
6aa20a22 5099
5d5d0015 5100 for (i = 0; i < num_rings; i++) {
13daffa2 5101 int j;
b6016b76 5102
5d5d0015 5103 rxbd = &rx_ring[i][0];
13daffa2 5104 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5d5d0015 5105 rxbd->rx_bd_len = buf_size;
13daffa2
MC
5106 rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5107 }
5d5d0015 5108 if (i == (num_rings - 1))
13daffa2
MC
5109 j = 0;
5110 else
5111 j = i + 1;
5d5d0015
MC
5112 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5113 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
13daffa2 5114 }
5d5d0015
MC
5115}
5116
5117static void
bb4f98ab 5118bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5d5d0015
MC
5119{
5120 int i;
5121 u16 prod, ring_prod;
bb4f98ab
MC
5122 u32 cid, rx_cid_addr, val;
5123 struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5124 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5125
5126 if (ring_num == 0)
5127 cid = RX_CID;
5128 else
5129 cid = RX_RSS_CID + ring_num - 1;
5130
5131 rx_cid_addr = GET_CID_ADDR(cid);
5d5d0015 5132
bb4f98ab 5133 bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5d5d0015
MC
5134 bp->rx_buf_use_size, bp->rx_max_ring);
5135
bb4f98ab 5136 bnx2_init_rx_context(bp, cid);
83e3fc89
MC
5137
5138 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5139 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5140 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5141 }
5142
62a8313c 5143 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
47bf4246 5144 if (bp->rx_pg_ring_size) {
bb4f98ab
MC
5145 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5146 rxr->rx_pg_desc_mapping,
47bf4246
MC
5147 PAGE_SIZE, bp->rx_max_pg_ring);
5148 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
62a8313c
MC
5149 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5150 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5e9ad9e1 5151 BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
47bf4246 5152
bb4f98ab 5153 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
62a8313c 5154 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
47bf4246 5155
bb4f98ab 5156 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
62a8313c 5157 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
47bf4246
MC
5158
5159 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5160 REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5161 }
b6016b76 5162
bb4f98ab 5163 val = (u64) rxr->rx_desc_mapping[0] >> 32;
62a8313c 5164 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
b6016b76 5165
bb4f98ab 5166 val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
62a8313c 5167 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
b6016b76 5168
bb4f98ab 5169 ring_prod = prod = rxr->rx_pg_prod;
47bf4246 5170 for (i = 0; i < bp->rx_pg_ring_size; i++) {
b929e53c 5171 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0) {
3a9c6a49
JP
5172 netdev_warn(bp->dev, "init'ed rx page ring %d with %d/%d pages only\n",
5173 ring_num, i, bp->rx_pg_ring_size);
47bf4246 5174 break;
b929e53c 5175 }
47bf4246
MC
5176 prod = NEXT_RX_BD(prod);
5177 ring_prod = RX_PG_RING_IDX(prod);
5178 }
bb4f98ab 5179 rxr->rx_pg_prod = prod;
47bf4246 5180
bb4f98ab 5181 ring_prod = prod = rxr->rx_prod;
236b6394 5182 for (i = 0; i < bp->rx_ring_size; i++) {
b929e53c 5183 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0) {
3a9c6a49
JP
5184 netdev_warn(bp->dev, "init'ed rx ring %d with %d/%d skbs only\n",
5185 ring_num, i, bp->rx_ring_size);
b6016b76 5186 break;
b929e53c 5187 }
b6016b76
MC
5188 prod = NEXT_RX_BD(prod);
5189 ring_prod = RX_RING_IDX(prod);
5190 }
bb4f98ab 5191 rxr->rx_prod = prod;
b6016b76 5192
bb4f98ab
MC
5193 rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5194 rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5195 rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
b6016b76 5196
bb4f98ab
MC
5197 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5198 REG_WR16(bp, rxr->rx_bidx_addr, prod);
5199
5200 REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
b6016b76
MC
5201}
5202
35e9010b
MC
5203static void
5204bnx2_init_all_rings(struct bnx2 *bp)
5205{
5206 int i;
5e9ad9e1 5207 u32 val;
35e9010b
MC
5208
5209 bnx2_clear_ring_states(bp);
5210
5211 REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5212 for (i = 0; i < bp->num_tx_rings; i++)
5213 bnx2_init_tx_ring(bp, i);
5214
5215 if (bp->num_tx_rings > 1)
5216 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5217 (TX_TSS_CID << 7));
5218
5e9ad9e1
MC
5219 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5220 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5221
bb4f98ab
MC
5222 for (i = 0; i < bp->num_rx_rings; i++)
5223 bnx2_init_rx_ring(bp, i);
5e9ad9e1
MC
5224
5225 if (bp->num_rx_rings > 1) {
5226 u32 tbl_32;
5227 u8 *tbl = (u8 *) &tbl_32;
5228
5229 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
5230 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
5231
5232 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5233 tbl[i % 4] = i % (bp->num_rx_rings - 1);
5234 if ((i % 4) == 3)
5235 bnx2_reg_wr_ind(bp,
5236 BNX2_RXP_SCRATCH_RSS_TBL + i,
5237 cpu_to_be32(tbl_32));
5238 }
5239
5240 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5241 BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5242
5243 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5244
5245 }
35e9010b
MC
5246}
5247
5d5d0015 5248static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
13daffa2 5249{
5d5d0015 5250 u32 max, num_rings = 1;
13daffa2 5251
5d5d0015
MC
5252 while (ring_size > MAX_RX_DESC_CNT) {
5253 ring_size -= MAX_RX_DESC_CNT;
13daffa2
MC
5254 num_rings++;
5255 }
5256 /* round to next power of 2 */
5d5d0015 5257 max = max_size;
13daffa2
MC
5258 while ((max & num_rings) == 0)
5259 max >>= 1;
5260
5261 if (num_rings != max)
5262 max <<= 1;
5263
5d5d0015
MC
5264 return max;
5265}
5266
5267static void
5268bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5269{
84eaa187 5270 u32 rx_size, rx_space, jumbo_size;
5d5d0015
MC
5271
5272 /* 8 for CRC and VLAN */
d89cb6af 5273 rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5d5d0015 5274
84eaa187
MC
5275 rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5276 sizeof(struct skb_shared_info);
5277
601d3d18 5278 bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
47bf4246
MC
5279 bp->rx_pg_ring_size = 0;
5280 bp->rx_max_pg_ring = 0;
5281 bp->rx_max_pg_ring_idx = 0;
f86e82fb 5282 if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
84eaa187
MC
5283 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5284
5285 jumbo_size = size * pages;
5286 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5287 jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5288
5289 bp->rx_pg_ring_size = jumbo_size;
5290 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5291 MAX_RX_PG_RINGS);
5292 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
601d3d18 5293 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
84eaa187
MC
5294 bp->rx_copy_thresh = 0;
5295 }
5d5d0015
MC
5296
5297 bp->rx_buf_use_size = rx_size;
5298 /* hw alignment */
5299 bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
d89cb6af 5300 bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5d5d0015
MC
5301 bp->rx_ring_size = size;
5302 bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
13daffa2
MC
5303 bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5304}
5305
b6016b76
MC
5306static void
5307bnx2_free_tx_skbs(struct bnx2 *bp)
5308{
5309 int i;
5310
35e9010b
MC
5311 for (i = 0; i < bp->num_tx_rings; i++) {
5312 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5313 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5314 int j;
b6016b76 5315
35e9010b 5316 if (txr->tx_buf_ring == NULL)
b6016b76 5317 continue;
b6016b76 5318
35e9010b 5319 for (j = 0; j < TX_DESC_CNT; ) {
3d16af86 5320 struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
35e9010b 5321 struct sk_buff *skb = tx_buf->skb;
e95524a7 5322 int k, last;
35e9010b
MC
5323
5324 if (skb == NULL) {
5325 j++;
5326 continue;
5327 }
5328
e95524a7 5329 pci_unmap_single(bp->pdev,
1a4ccc2d 5330 dma_unmap_addr(tx_buf, mapping),
e95524a7
AD
5331 skb_headlen(skb),
5332 PCI_DMA_TODEVICE);
b6016b76 5333
35e9010b 5334 tx_buf->skb = NULL;
b6016b76 5335
e95524a7
AD
5336 last = tx_buf->nr_frags;
5337 j++;
5338 for (k = 0; k < last; k++, j++) {
5339 tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
5340 pci_unmap_page(bp->pdev,
1a4ccc2d 5341 dma_unmap_addr(tx_buf, mapping),
e95524a7
AD
5342 skb_shinfo(skb)->frags[k].size,
5343 PCI_DMA_TODEVICE);
5344 }
35e9010b 5345 dev_kfree_skb(skb);
b6016b76 5346 }
b6016b76 5347 }
b6016b76
MC
5348}
5349
5350static void
5351bnx2_free_rx_skbs(struct bnx2 *bp)
5352{
5353 int i;
5354
bb4f98ab
MC
5355 for (i = 0; i < bp->num_rx_rings; i++) {
5356 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5357 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5358 int j;
b6016b76 5359
bb4f98ab
MC
5360 if (rxr->rx_buf_ring == NULL)
5361 return;
b6016b76 5362
bb4f98ab
MC
5363 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5364 struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5365 struct sk_buff *skb = rx_buf->skb;
b6016b76 5366
bb4f98ab
MC
5367 if (skb == NULL)
5368 continue;
b6016b76 5369
bb4f98ab 5370 pci_unmap_single(bp->pdev,
1a4ccc2d 5371 dma_unmap_addr(rx_buf, mapping),
bb4f98ab
MC
5372 bp->rx_buf_use_size,
5373 PCI_DMA_FROMDEVICE);
b6016b76 5374
bb4f98ab
MC
5375 rx_buf->skb = NULL;
5376
5377 dev_kfree_skb(skb);
5378 }
5379 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5380 bnx2_free_rx_page(bp, rxr, j);
b6016b76
MC
5381 }
5382}
5383
5384static void
5385bnx2_free_skbs(struct bnx2 *bp)
5386{
5387 bnx2_free_tx_skbs(bp);
5388 bnx2_free_rx_skbs(bp);
5389}
5390
5391static int
5392bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5393{
5394 int rc;
5395
5396 rc = bnx2_reset_chip(bp, reset_code);
5397 bnx2_free_skbs(bp);
5398 if (rc)
5399 return rc;
5400
fba9fe91
MC
5401 if ((rc = bnx2_init_chip(bp)) != 0)
5402 return rc;
5403
35e9010b 5404 bnx2_init_all_rings(bp);
b6016b76
MC
5405 return 0;
5406}
5407
5408static int
9a120bc5 5409bnx2_init_nic(struct bnx2 *bp, int reset_phy)
b6016b76
MC
5410{
5411 int rc;
5412
5413 if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5414 return rc;
5415
80be4434 5416 spin_lock_bh(&bp->phy_lock);
9a120bc5 5417 bnx2_init_phy(bp, reset_phy);
b6016b76 5418 bnx2_set_link(bp);
543a827d
MC
5419 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5420 bnx2_remote_phy_event(bp);
0d8a6571 5421 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
5422 return 0;
5423}
5424
74bf4ba3
MC
5425static int
5426bnx2_shutdown_chip(struct bnx2 *bp)
5427{
5428 u32 reset_code;
5429
5430 if (bp->flags & BNX2_FLAG_NO_WOL)
5431 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5432 else if (bp->wol)
5433 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5434 else
5435 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5436
5437 return bnx2_reset_chip(bp, reset_code);
5438}
5439
b6016b76
MC
5440static int
5441bnx2_test_registers(struct bnx2 *bp)
5442{
5443 int ret;
5bae30c9 5444 int i, is_5709;
f71e1309 5445 static const struct {
b6016b76
MC
5446 u16 offset;
5447 u16 flags;
5bae30c9 5448#define BNX2_FL_NOT_5709 1
b6016b76
MC
5449 u32 rw_mask;
5450 u32 ro_mask;
5451 } reg_tbl[] = {
5452 { 0x006c, 0, 0x00000000, 0x0000003f },
5453 { 0x0090, 0, 0xffffffff, 0x00000000 },
5454 { 0x0094, 0, 0x00000000, 0x00000000 },
5455
5bae30c9
MC
5456 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5457 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5458 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5459 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5460 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5461 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5462 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5463 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5464 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5465
5466 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5467 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5468 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5469 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5470 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5471 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5472
5473 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5474 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5475 { 0x0c08, BNX2_FL_NOT_5709, 0x0f0ff073, 0x00000000 },
b6016b76
MC
5476
5477 { 0x1000, 0, 0x00000000, 0x00000001 },
15b169cc 5478 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
b6016b76
MC
5479
5480 { 0x1408, 0, 0x01c00800, 0x00000000 },
5481 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5482 { 0x14a8, 0, 0x00000000, 0x000001ff },
5b0c76ad 5483 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
b6016b76
MC
5484 { 0x14b0, 0, 0x00000002, 0x00000001 },
5485 { 0x14b8, 0, 0x00000000, 0x00000000 },
5486 { 0x14c0, 0, 0x00000000, 0x00000009 },
5487 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5488 { 0x14cc, 0, 0x00000000, 0x00000001 },
5489 { 0x14d0, 0, 0xffffffff, 0x00000000 },
b6016b76
MC
5490
5491 { 0x1800, 0, 0x00000000, 0x00000001 },
5492 { 0x1804, 0, 0x00000000, 0x00000003 },
b6016b76
MC
5493
5494 { 0x2800, 0, 0x00000000, 0x00000001 },
5495 { 0x2804, 0, 0x00000000, 0x00003f01 },
5496 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5497 { 0x2810, 0, 0xffff0000, 0x00000000 },
5498 { 0x2814, 0, 0xffff0000, 0x00000000 },
5499 { 0x2818, 0, 0xffff0000, 0x00000000 },
5500 { 0x281c, 0, 0xffff0000, 0x00000000 },
5501 { 0x2834, 0, 0xffffffff, 0x00000000 },
5502 { 0x2840, 0, 0x00000000, 0xffffffff },
5503 { 0x2844, 0, 0x00000000, 0xffffffff },
5504 { 0x2848, 0, 0xffffffff, 0x00000000 },
5505 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5506
5507 { 0x2c00, 0, 0x00000000, 0x00000011 },
5508 { 0x2c04, 0, 0x00000000, 0x00030007 },
5509
b6016b76
MC
5510 { 0x3c00, 0, 0x00000000, 0x00000001 },
5511 { 0x3c04, 0, 0x00000000, 0x00070000 },
5512 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5513 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5514 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5515 { 0x3c14, 0, 0x00000000, 0xffffffff },
5516 { 0x3c18, 0, 0x00000000, 0xffffffff },
5517 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5518 { 0x3c20, 0, 0xffffff00, 0x00000000 },
b6016b76
MC
5519
5520 { 0x5004, 0, 0x00000000, 0x0000007f },
5521 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
b6016b76 5522
b6016b76
MC
5523 { 0x5c00, 0, 0x00000000, 0x00000001 },
5524 { 0x5c04, 0, 0x00000000, 0x0003000f },
5525 { 0x5c08, 0, 0x00000003, 0x00000000 },
5526 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5527 { 0x5c10, 0, 0x00000000, 0xffffffff },
5528 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5529 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5530 { 0x5c88, 0, 0x00000000, 0x00077373 },
5531 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5532
5533 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5534 { 0x680c, 0, 0xffffffff, 0x00000000 },
5535 { 0x6810, 0, 0xffffffff, 0x00000000 },
5536 { 0x6814, 0, 0xffffffff, 0x00000000 },
5537 { 0x6818, 0, 0xffffffff, 0x00000000 },
5538 { 0x681c, 0, 0xffffffff, 0x00000000 },
5539 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5540 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5541 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5542 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5543 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5544 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5545 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5546 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5547 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5548 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5549 { 0x684c, 0, 0xffffffff, 0x00000000 },
5550 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5551 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5552 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5553 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5554 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5555 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5556
5557 { 0xffff, 0, 0x00000000, 0x00000000 },
5558 };
5559
5560 ret = 0;
5bae30c9
MC
5561 is_5709 = 0;
5562 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5563 is_5709 = 1;
5564
b6016b76
MC
5565 for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5566 u32 offset, rw_mask, ro_mask, save_val, val;
5bae30c9
MC
5567 u16 flags = reg_tbl[i].flags;
5568
5569 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5570 continue;
b6016b76
MC
5571
5572 offset = (u32) reg_tbl[i].offset;
5573 rw_mask = reg_tbl[i].rw_mask;
5574 ro_mask = reg_tbl[i].ro_mask;
5575
14ab9b86 5576 save_val = readl(bp->regview + offset);
b6016b76 5577
14ab9b86 5578 writel(0, bp->regview + offset);
b6016b76 5579
14ab9b86 5580 val = readl(bp->regview + offset);
b6016b76
MC
5581 if ((val & rw_mask) != 0) {
5582 goto reg_test_err;
5583 }
5584
5585 if ((val & ro_mask) != (save_val & ro_mask)) {
5586 goto reg_test_err;
5587 }
5588
14ab9b86 5589 writel(0xffffffff, bp->regview + offset);
b6016b76 5590
14ab9b86 5591 val = readl(bp->regview + offset);
b6016b76
MC
5592 if ((val & rw_mask) != rw_mask) {
5593 goto reg_test_err;
5594 }
5595
5596 if ((val & ro_mask) != (save_val & ro_mask)) {
5597 goto reg_test_err;
5598 }
5599
14ab9b86 5600 writel(save_val, bp->regview + offset);
b6016b76
MC
5601 continue;
5602
5603reg_test_err:
14ab9b86 5604 writel(save_val, bp->regview + offset);
b6016b76
MC
5605 ret = -ENODEV;
5606 break;
5607 }
5608 return ret;
5609}
5610
5611static int
5612bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5613{
f71e1309 5614 static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
b6016b76
MC
5615 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5616 int i;
5617
5618 for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5619 u32 offset;
5620
5621 for (offset = 0; offset < size; offset += 4) {
5622
2726d6e1 5623 bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
b6016b76 5624
2726d6e1 5625 if (bnx2_reg_rd_ind(bp, start + offset) !=
b6016b76
MC
5626 test_pattern[i]) {
5627 return -ENODEV;
5628 }
5629 }
5630 }
5631 return 0;
5632}
5633
5634static int
5635bnx2_test_memory(struct bnx2 *bp)
5636{
5637 int ret = 0;
5638 int i;
5bae30c9 5639 static struct mem_entry {
b6016b76
MC
5640 u32 offset;
5641 u32 len;
5bae30c9 5642 } mem_tbl_5706[] = {
b6016b76 5643 { 0x60000, 0x4000 },
5b0c76ad 5644 { 0xa0000, 0x3000 },
b6016b76
MC
5645 { 0xe0000, 0x4000 },
5646 { 0x120000, 0x4000 },
5647 { 0x1a0000, 0x4000 },
5648 { 0x160000, 0x4000 },
5649 { 0xffffffff, 0 },
5bae30c9
MC
5650 },
5651 mem_tbl_5709[] = {
5652 { 0x60000, 0x4000 },
5653 { 0xa0000, 0x3000 },
5654 { 0xe0000, 0x4000 },
5655 { 0x120000, 0x4000 },
5656 { 0x1a0000, 0x4000 },
5657 { 0xffffffff, 0 },
b6016b76 5658 };
5bae30c9
MC
5659 struct mem_entry *mem_tbl;
5660
5661 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5662 mem_tbl = mem_tbl_5709;
5663 else
5664 mem_tbl = mem_tbl_5706;
b6016b76
MC
5665
5666 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5667 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5668 mem_tbl[i].len)) != 0) {
5669 return ret;
5670 }
5671 }
6aa20a22 5672
b6016b76
MC
5673 return ret;
5674}
5675
bc5a0690
MC
5676#define BNX2_MAC_LOOPBACK 0
5677#define BNX2_PHY_LOOPBACK 1
5678
b6016b76 5679static int
bc5a0690 5680bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
b6016b76
MC
5681{
5682 unsigned int pkt_size, num_pkts, i;
5683 struct sk_buff *skb, *rx_skb;
5684 unsigned char *packet;
bc5a0690 5685 u16 rx_start_idx, rx_idx;
b6016b76
MC
5686 dma_addr_t map;
5687 struct tx_bd *txbd;
5688 struct sw_bd *rx_buf;
5689 struct l2_fhdr *rx_hdr;
5690 int ret = -ENODEV;
c76c0475 5691 struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
35e9010b 5692 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
bb4f98ab 5693 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
c76c0475
MC
5694
5695 tx_napi = bnapi;
b6016b76 5696
35e9010b 5697 txr = &tx_napi->tx_ring;
bb4f98ab 5698 rxr = &bnapi->rx_ring;
bc5a0690
MC
5699 if (loopback_mode == BNX2_MAC_LOOPBACK) {
5700 bp->loopback = MAC_LOOPBACK;
5701 bnx2_set_mac_loopback(bp);
5702 }
5703 else if (loopback_mode == BNX2_PHY_LOOPBACK) {
583c28e5 5704 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
489310a4
MC
5705 return 0;
5706
80be4434 5707 bp->loopback = PHY_LOOPBACK;
bc5a0690
MC
5708 bnx2_set_phy_loopback(bp);
5709 }
5710 else
5711 return -EINVAL;
b6016b76 5712
84eaa187 5713 pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
932f3772 5714 skb = netdev_alloc_skb(bp->dev, pkt_size);
b6cbc3b6
JL
5715 if (!skb)
5716 return -ENOMEM;
b6016b76 5717 packet = skb_put(skb, pkt_size);
6634292b 5718 memcpy(packet, bp->dev->dev_addr, 6);
b6016b76
MC
5719 memset(packet + 6, 0x0, 8);
5720 for (i = 14; i < pkt_size; i++)
5721 packet[i] = (unsigned char) (i & 0xff);
5722
e95524a7
AD
5723 map = pci_map_single(bp->pdev, skb->data, pkt_size,
5724 PCI_DMA_TODEVICE);
5725 if (pci_dma_mapping_error(bp->pdev, map)) {
3d16af86
BL
5726 dev_kfree_skb(skb);
5727 return -EIO;
5728 }
b6016b76 5729
bf5295bb
MC
5730 REG_WR(bp, BNX2_HC_COMMAND,
5731 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5732
b6016b76
MC
5733 REG_RD(bp, BNX2_HC_COMMAND);
5734
5735 udelay(5);
35efa7c1 5736 rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
b6016b76 5737
b6016b76
MC
5738 num_pkts = 0;
5739
35e9010b 5740 txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
b6016b76
MC
5741
5742 txbd->tx_bd_haddr_hi = (u64) map >> 32;
5743 txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5744 txbd->tx_bd_mss_nbytes = pkt_size;
5745 txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5746
5747 num_pkts++;
35e9010b
MC
5748 txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5749 txr->tx_prod_bseq += pkt_size;
b6016b76 5750
35e9010b
MC
5751 REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5752 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
b6016b76
MC
5753
5754 udelay(100);
5755
bf5295bb
MC
5756 REG_WR(bp, BNX2_HC_COMMAND,
5757 bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5758
b6016b76
MC
5759 REG_RD(bp, BNX2_HC_COMMAND);
5760
5761 udelay(5);
5762
e95524a7 5763 pci_unmap_single(bp->pdev, map, pkt_size, PCI_DMA_TODEVICE);
745720e5 5764 dev_kfree_skb(skb);
b6016b76 5765
35e9010b 5766 if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
b6016b76 5767 goto loopback_test_done;
b6016b76 5768
35efa7c1 5769 rx_idx = bnx2_get_hw_rx_cons(bnapi);
b6016b76
MC
5770 if (rx_idx != rx_start_idx + num_pkts) {
5771 goto loopback_test_done;
5772 }
5773
bb4f98ab 5774 rx_buf = &rxr->rx_buf_ring[rx_start_idx];
b6016b76
MC
5775 rx_skb = rx_buf->skb;
5776
a33fa66b 5777 rx_hdr = rx_buf->desc;
d89cb6af 5778 skb_reserve(rx_skb, BNX2_RX_OFFSET);
b6016b76
MC
5779
5780 pci_dma_sync_single_for_cpu(bp->pdev,
1a4ccc2d 5781 dma_unmap_addr(rx_buf, mapping),
b6016b76
MC
5782 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5783
ade2bfe7 5784 if (rx_hdr->l2_fhdr_status &
b6016b76
MC
5785 (L2_FHDR_ERRORS_BAD_CRC |
5786 L2_FHDR_ERRORS_PHY_DECODE |
5787 L2_FHDR_ERRORS_ALIGNMENT |
5788 L2_FHDR_ERRORS_TOO_SHORT |
5789 L2_FHDR_ERRORS_GIANT_FRAME)) {
5790
5791 goto loopback_test_done;
5792 }
5793
5794 if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5795 goto loopback_test_done;
5796 }
5797
5798 for (i = 14; i < pkt_size; i++) {
5799 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5800 goto loopback_test_done;
5801 }
5802 }
5803
5804 ret = 0;
5805
5806loopback_test_done:
5807 bp->loopback = 0;
5808 return ret;
5809}
5810
bc5a0690
MC
5811#define BNX2_MAC_LOOPBACK_FAILED 1
5812#define BNX2_PHY_LOOPBACK_FAILED 2
5813#define BNX2_LOOPBACK_FAILED (BNX2_MAC_LOOPBACK_FAILED | \
5814 BNX2_PHY_LOOPBACK_FAILED)
5815
5816static int
5817bnx2_test_loopback(struct bnx2 *bp)
5818{
5819 int rc = 0;
5820
5821 if (!netif_running(bp->dev))
5822 return BNX2_LOOPBACK_FAILED;
5823
5824 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5825 spin_lock_bh(&bp->phy_lock);
9a120bc5 5826 bnx2_init_phy(bp, 1);
bc5a0690
MC
5827 spin_unlock_bh(&bp->phy_lock);
5828 if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5829 rc |= BNX2_MAC_LOOPBACK_FAILED;
5830 if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5831 rc |= BNX2_PHY_LOOPBACK_FAILED;
5832 return rc;
5833}
5834
b6016b76
MC
5835#define NVRAM_SIZE 0x200
5836#define CRC32_RESIDUAL 0xdebb20e3
5837
5838static int
5839bnx2_test_nvram(struct bnx2 *bp)
5840{
b491edd5 5841 __be32 buf[NVRAM_SIZE / 4];
b6016b76
MC
5842 u8 *data = (u8 *) buf;
5843 int rc = 0;
5844 u32 magic, csum;
5845
5846 if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5847 goto test_nvram_done;
5848
5849 magic = be32_to_cpu(buf[0]);
5850 if (magic != 0x669955aa) {
5851 rc = -ENODEV;
5852 goto test_nvram_done;
5853 }
5854
5855 if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5856 goto test_nvram_done;
5857
5858 csum = ether_crc_le(0x100, data);
5859 if (csum != CRC32_RESIDUAL) {
5860 rc = -ENODEV;
5861 goto test_nvram_done;
5862 }
5863
5864 csum = ether_crc_le(0x100, data + 0x100);
5865 if (csum != CRC32_RESIDUAL) {
5866 rc = -ENODEV;
5867 }
5868
5869test_nvram_done:
5870 return rc;
5871}
5872
5873static int
5874bnx2_test_link(struct bnx2 *bp)
5875{
5876 u32 bmsr;
5877
9f52b564
MC
5878 if (!netif_running(bp->dev))
5879 return -ENODEV;
5880
583c28e5 5881 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
489310a4
MC
5882 if (bp->link_up)
5883 return 0;
5884 return -ENODEV;
5885 }
c770a65c 5886 spin_lock_bh(&bp->phy_lock);
27a005b8
MC
5887 bnx2_enable_bmsr1(bp);
5888 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5889 bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5890 bnx2_disable_bmsr1(bp);
c770a65c 5891 spin_unlock_bh(&bp->phy_lock);
6aa20a22 5892
b6016b76
MC
5893 if (bmsr & BMSR_LSTATUS) {
5894 return 0;
5895 }
5896 return -ENODEV;
5897}
5898
5899static int
5900bnx2_test_intr(struct bnx2 *bp)
5901{
5902 int i;
b6016b76
MC
5903 u16 status_idx;
5904
5905 if (!netif_running(bp->dev))
5906 return -ENODEV;
5907
5908 status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5909
5910 /* This register is not touched during run-time. */
bf5295bb 5911 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
b6016b76
MC
5912 REG_RD(bp, BNX2_HC_COMMAND);
5913
5914 for (i = 0; i < 10; i++) {
5915 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5916 status_idx) {
5917
5918 break;
5919 }
5920
5921 msleep_interruptible(10);
5922 }
5923 if (i < 10)
5924 return 0;
5925
5926 return -ENODEV;
5927}
5928
38ea3686 5929/* Determining link for parallel detection. */
b2fadeae
MC
5930static int
5931bnx2_5706_serdes_has_link(struct bnx2 *bp)
5932{
5933 u32 mode_ctl, an_dbg, exp;
5934
38ea3686
MC
5935 if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5936 return 0;
5937
b2fadeae
MC
5938 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5939 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5940
5941 if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5942 return 0;
5943
5944 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5945 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5946 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5947
f3014c0c 5948 if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
b2fadeae
MC
5949 return 0;
5950
5951 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5952 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5953 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5954
5955 if (exp & MII_EXPAND_REG1_RUDI_C) /* receiving CONFIG */
5956 return 0;
5957
5958 return 1;
5959}
5960
b6016b76 5961static void
48b01e2d 5962bnx2_5706_serdes_timer(struct bnx2 *bp)
b6016b76 5963{
b2fadeae
MC
5964 int check_link = 1;
5965
48b01e2d 5966 spin_lock(&bp->phy_lock);
b2fadeae 5967 if (bp->serdes_an_pending) {
48b01e2d 5968 bp->serdes_an_pending--;
b2fadeae
MC
5969 check_link = 0;
5970 } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
48b01e2d 5971 u32 bmcr;
b6016b76 5972
ac392abc 5973 bp->current_interval = BNX2_TIMER_INTERVAL;
cd339a0e 5974
ca58c3af 5975 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76 5976
48b01e2d 5977 if (bmcr & BMCR_ANENABLE) {
b2fadeae 5978 if (bnx2_5706_serdes_has_link(bp)) {
48b01e2d
MC
5979 bmcr &= ~BMCR_ANENABLE;
5980 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
ca58c3af 5981 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
583c28e5 5982 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
48b01e2d 5983 }
b6016b76 5984 }
48b01e2d
MC
5985 }
5986 else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
583c28e5 5987 (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
48b01e2d 5988 u32 phy2;
b6016b76 5989
48b01e2d
MC
5990 bnx2_write_phy(bp, 0x17, 0x0f01);
5991 bnx2_read_phy(bp, 0x15, &phy2);
5992 if (phy2 & 0x20) {
5993 u32 bmcr;
cd339a0e 5994
ca58c3af 5995 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
48b01e2d 5996 bmcr |= BMCR_ANENABLE;
ca58c3af 5997 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
b6016b76 5998
583c28e5 5999 bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
48b01e2d
MC
6000 }
6001 } else
ac392abc 6002 bp->current_interval = BNX2_TIMER_INTERVAL;
b6016b76 6003
a2724e25 6004 if (check_link) {
b2fadeae
MC
6005 u32 val;
6006
6007 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
6008 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6009 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
6010
a2724e25
MC
6011 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
6012 if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
6013 bnx2_5706s_force_link_dn(bp, 1);
6014 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
6015 } else
6016 bnx2_set_link(bp);
6017 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
6018 bnx2_set_link(bp);
b2fadeae 6019 }
48b01e2d
MC
6020 spin_unlock(&bp->phy_lock);
6021}
b6016b76 6022
f8dd064e
MC
6023static void
6024bnx2_5708_serdes_timer(struct bnx2 *bp)
6025{
583c28e5 6026 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
0d8a6571
MC
6027 return;
6028
583c28e5 6029 if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
f8dd064e
MC
6030 bp->serdes_an_pending = 0;
6031 return;
6032 }
b6016b76 6033
f8dd064e
MC
6034 spin_lock(&bp->phy_lock);
6035 if (bp->serdes_an_pending)
6036 bp->serdes_an_pending--;
6037 else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
6038 u32 bmcr;
b6016b76 6039
ca58c3af 6040 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
f8dd064e 6041 if (bmcr & BMCR_ANENABLE) {
605a9e20 6042 bnx2_enable_forced_2g5(bp);
40105c0b 6043 bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
f8dd064e 6044 } else {
605a9e20 6045 bnx2_disable_forced_2g5(bp);
f8dd064e 6046 bp->serdes_an_pending = 2;
ac392abc 6047 bp->current_interval = BNX2_TIMER_INTERVAL;
b6016b76 6048 }
b6016b76 6049
f8dd064e 6050 } else
ac392abc 6051 bp->current_interval = BNX2_TIMER_INTERVAL;
b6016b76 6052
f8dd064e
MC
6053 spin_unlock(&bp->phy_lock);
6054}
6055
48b01e2d
MC
6056static void
6057bnx2_timer(unsigned long data)
6058{
6059 struct bnx2 *bp = (struct bnx2 *) data;
b6016b76 6060
48b01e2d
MC
6061 if (!netif_running(bp->dev))
6062 return;
b6016b76 6063
48b01e2d
MC
6064 if (atomic_read(&bp->intr_sem) != 0)
6065 goto bnx2_restart_timer;
b6016b76 6066
efba0180
MC
6067 if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6068 BNX2_FLAG_USING_MSI)
6069 bnx2_chk_missed_msi(bp);
6070
df149d70 6071 bnx2_send_heart_beat(bp);
b6016b76 6072
2726d6e1
MC
6073 bp->stats_blk->stat_FwRxDrop =
6074 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
b6016b76 6075
02537b06 6076 /* workaround occasional corrupted counters */
61d9e3fa 6077 if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
02537b06
MC
6078 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6079 BNX2_HC_COMMAND_STATS_NOW);
6080
583c28e5 6081 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
f8dd064e
MC
6082 if (CHIP_NUM(bp) == CHIP_NUM_5706)
6083 bnx2_5706_serdes_timer(bp);
27a005b8 6084 else
f8dd064e 6085 bnx2_5708_serdes_timer(bp);
b6016b76
MC
6086 }
6087
6088bnx2_restart_timer:
cd339a0e 6089 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
6090}
6091
8e6a72c4
MC
6092static int
6093bnx2_request_irq(struct bnx2 *bp)
6094{
6d866ffc 6095 unsigned long flags;
b4b36042
MC
6096 struct bnx2_irq *irq;
6097 int rc = 0, i;
8e6a72c4 6098
f86e82fb 6099 if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6d866ffc
MC
6100 flags = 0;
6101 else
6102 flags = IRQF_SHARED;
b4b36042
MC
6103
6104 for (i = 0; i < bp->irq_nvecs; i++) {
6105 irq = &bp->irq_tbl[i];
c76c0475 6106 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
f0ea2e63 6107 &bp->bnx2_napi[i]);
b4b36042
MC
6108 if (rc)
6109 break;
6110 irq->requested = 1;
6111 }
8e6a72c4
MC
6112 return rc;
6113}
6114
6115static void
6116bnx2_free_irq(struct bnx2 *bp)
6117{
b4b36042
MC
6118 struct bnx2_irq *irq;
6119 int i;
8e6a72c4 6120
b4b36042
MC
6121 for (i = 0; i < bp->irq_nvecs; i++) {
6122 irq = &bp->irq_tbl[i];
6123 if (irq->requested)
f0ea2e63 6124 free_irq(irq->vector, &bp->bnx2_napi[i]);
b4b36042 6125 irq->requested = 0;
6d866ffc 6126 }
f86e82fb 6127 if (bp->flags & BNX2_FLAG_USING_MSI)
b4b36042 6128 pci_disable_msi(bp->pdev);
f86e82fb 6129 else if (bp->flags & BNX2_FLAG_USING_MSIX)
b4b36042
MC
6130 pci_disable_msix(bp->pdev);
6131
f86e82fb 6132 bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
b4b36042
MC
6133}
6134
6135static void
5e9ad9e1 6136bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
b4b36042 6137{
57851d84
MC
6138 int i, rc;
6139 struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
4e1d0de9
MC
6140 struct net_device *dev = bp->dev;
6141 const int len = sizeof(bp->irq_tbl[0].name);
57851d84 6142
b4b36042
MC
6143 bnx2_setup_msix_tbl(bp);
6144 REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6145 REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6146 REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
57851d84 6147
e2eb8e38
BL
6148 /* Need to flush the previous three writes to ensure MSI-X
6149 * is setup properly */
6150 REG_RD(bp, BNX2_PCI_MSIX_CONTROL);
6151
57851d84
MC
6152 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6153 msix_ent[i].entry = i;
6154 msix_ent[i].vector = 0;
6155 }
6156
6157 rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
6158 if (rc != 0)
6159 return;
6160
5e9ad9e1 6161 bp->irq_nvecs = msix_vecs;
f86e82fb 6162 bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
69010313 6163 for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
57851d84 6164 bp->irq_tbl[i].vector = msix_ent[i].vector;
69010313
MC
6165 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6166 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6167 }
6d866ffc
MC
6168}
6169
6170static void
6171bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6172{
5e9ad9e1 6173 int cpus = num_online_cpus();
706bf240 6174 int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
5e9ad9e1 6175
6d866ffc
MC
6176 bp->irq_tbl[0].handler = bnx2_interrupt;
6177 strcpy(bp->irq_tbl[0].name, bp->dev->name);
b4b36042
MC
6178 bp->irq_nvecs = 1;
6179 bp->irq_tbl[0].vector = bp->pdev->irq;
6180
5e9ad9e1
MC
6181 if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
6182 bnx2_enable_msix(bp, msix_vecs);
6d866ffc 6183
f86e82fb
DM
6184 if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6185 !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6d866ffc 6186 if (pci_enable_msi(bp->pdev) == 0) {
f86e82fb 6187 bp->flags |= BNX2_FLAG_USING_MSI;
6d866ffc 6188 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
f86e82fb 6189 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6d866ffc
MC
6190 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6191 } else
6192 bp->irq_tbl[0].handler = bnx2_msi;
b4b36042
MC
6193
6194 bp->irq_tbl[0].vector = bp->pdev->irq;
6d866ffc
MC
6195 }
6196 }
706bf240
BL
6197
6198 bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6199 bp->dev->real_num_tx_queues = bp->num_tx_rings;
6200
5e9ad9e1 6201 bp->num_rx_rings = bp->irq_nvecs;
8e6a72c4
MC
6202}
6203
b6016b76
MC
6204/* Called with rtnl_lock */
6205static int
6206bnx2_open(struct net_device *dev)
6207{
972ec0d4 6208 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6209 int rc;
6210
1b2f922f
MC
6211 netif_carrier_off(dev);
6212
829ca9a3 6213 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
6214 bnx2_disable_int(bp);
6215
35e9010b 6216 bnx2_setup_int_mode(bp, disable_msi);
4327ba43 6217 bnx2_init_napi(bp);
35e9010b 6218 bnx2_napi_enable(bp);
b6016b76 6219 rc = bnx2_alloc_mem(bp);
2739a8bb
MC
6220 if (rc)
6221 goto open_err;
b6016b76 6222
8e6a72c4 6223 rc = bnx2_request_irq(bp);
2739a8bb
MC
6224 if (rc)
6225 goto open_err;
b6016b76 6226
9a120bc5 6227 rc = bnx2_init_nic(bp, 1);
2739a8bb
MC
6228 if (rc)
6229 goto open_err;
6aa20a22 6230
cd339a0e 6231 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
6232
6233 atomic_set(&bp->intr_sem, 0);
6234
354fcd77
MC
6235 memset(bp->temp_stats_blk, 0, sizeof(struct statistics_block));
6236
b6016b76
MC
6237 bnx2_enable_int(bp);
6238
f86e82fb 6239 if (bp->flags & BNX2_FLAG_USING_MSI) {
b6016b76
MC
6240 /* Test MSI to make sure it is working
6241 * If MSI test fails, go back to INTx mode
6242 */
6243 if (bnx2_test_intr(bp) != 0) {
3a9c6a49 6244 netdev_warn(bp->dev, "No interrupt was generated using MSI, switching to INTx mode. Please report this failure to the PCI maintainer and include system chipset information.\n");
b6016b76
MC
6245
6246 bnx2_disable_int(bp);
8e6a72c4 6247 bnx2_free_irq(bp);
b6016b76 6248
6d866ffc
MC
6249 bnx2_setup_int_mode(bp, 1);
6250
9a120bc5 6251 rc = bnx2_init_nic(bp, 0);
b6016b76 6252
8e6a72c4
MC
6253 if (!rc)
6254 rc = bnx2_request_irq(bp);
6255
b6016b76 6256 if (rc) {
b6016b76 6257 del_timer_sync(&bp->timer);
2739a8bb 6258 goto open_err;
b6016b76
MC
6259 }
6260 bnx2_enable_int(bp);
6261 }
6262 }
f86e82fb 6263 if (bp->flags & BNX2_FLAG_USING_MSI)
3a9c6a49 6264 netdev_info(dev, "using MSI\n");
f86e82fb 6265 else if (bp->flags & BNX2_FLAG_USING_MSIX)
3a9c6a49 6266 netdev_info(dev, "using MSIX\n");
b6016b76 6267
706bf240 6268 netif_tx_start_all_queues(dev);
b6016b76
MC
6269
6270 return 0;
2739a8bb
MC
6271
6272open_err:
6273 bnx2_napi_disable(bp);
6274 bnx2_free_skbs(bp);
6275 bnx2_free_irq(bp);
6276 bnx2_free_mem(bp);
6277 return rc;
b6016b76
MC
6278}
6279
6280static void
c4028958 6281bnx2_reset_task(struct work_struct *work)
b6016b76 6282{
c4028958 6283 struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
b6016b76 6284
51bf6bb4
MC
6285 rtnl_lock();
6286 if (!netif_running(bp->dev)) {
6287 rtnl_unlock();
afdc08b9 6288 return;
51bf6bb4 6289 }
afdc08b9 6290
212f9934 6291 bnx2_netif_stop(bp, true);
b6016b76 6292
9a120bc5 6293 bnx2_init_nic(bp, 1);
b6016b76
MC
6294
6295 atomic_set(&bp->intr_sem, 1);
212f9934 6296 bnx2_netif_start(bp, true);
51bf6bb4 6297 rtnl_unlock();
b6016b76
MC
6298}
6299
20175c57
MC
6300static void
6301bnx2_dump_state(struct bnx2 *bp)
6302{
6303 struct net_device *dev = bp->dev;
6304
3a9c6a49
JP
6305 netdev_err(dev, "DEBUG: intr_sem[%x]\n", atomic_read(&bp->intr_sem));
6306 netdev_err(dev, "DEBUG: EMAC_TX_STATUS[%08x] RPM_MGMT_PKT_CTRL[%08x]\n",
6307 REG_RD(bp, BNX2_EMAC_TX_STATUS),
6308 REG_RD(bp, BNX2_RPM_MGMT_PKT_CTRL));
6309 netdev_err(dev, "DEBUG: MCP_STATE_P0[%08x] MCP_STATE_P1[%08x]\n",
6310 bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P0),
6311 bnx2_reg_rd_ind(bp, BNX2_MCP_STATE_P1));
6312 netdev_err(dev, "DEBUG: HC_STATS_INTERRUPT_STATUS[%08x]\n",
6313 REG_RD(bp, BNX2_HC_STATS_INTERRUPT_STATUS));
20175c57 6314 if (bp->flags & BNX2_FLAG_USING_MSIX)
3a9c6a49
JP
6315 netdev_err(dev, "DEBUG: PBA[%08x]\n",
6316 REG_RD(bp, BNX2_PCI_GRC_WINDOW3_BASE));
20175c57
MC
6317}
6318
b6016b76
MC
6319static void
6320bnx2_tx_timeout(struct net_device *dev)
6321{
972ec0d4 6322 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6323
20175c57
MC
6324 bnx2_dump_state(bp);
6325
b6016b76
MC
6326 /* This allows the netif to be shutdown gracefully before resetting */
6327 schedule_work(&bp->reset_task);
6328}
6329
6330#ifdef BCM_VLAN
6331/* Called with rtnl_lock */
6332static void
6333bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6334{
972ec0d4 6335 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6336
3767546c 6337 if (netif_running(dev))
212f9934 6338 bnx2_netif_stop(bp, false);
b6016b76
MC
6339
6340 bp->vlgrp = vlgrp;
3767546c
MC
6341
6342 if (!netif_running(dev))
6343 return;
6344
b6016b76 6345 bnx2_set_rx_mode(dev);
7c62e83b
MC
6346 if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6347 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
b6016b76 6348
212f9934 6349 bnx2_netif_start(bp, false);
b6016b76 6350}
b6016b76
MC
6351#endif
6352
932ff279 6353/* Called with netif_tx_lock.
2f8af120
MC
6354 * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6355 * netif_wake_queue().
b6016b76 6356 */
61357325 6357static netdev_tx_t
b6016b76
MC
6358bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6359{
972ec0d4 6360 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6361 dma_addr_t mapping;
6362 struct tx_bd *txbd;
3d16af86 6363 struct sw_tx_bd *tx_buf;
b6016b76
MC
6364 u32 len, vlan_tag_flags, last_frag, mss;
6365 u16 prod, ring_prod;
6366 int i;
706bf240
BL
6367 struct bnx2_napi *bnapi;
6368 struct bnx2_tx_ring_info *txr;
6369 struct netdev_queue *txq;
6370
6371 /* Determine which tx ring we will be placed on */
6372 i = skb_get_queue_mapping(skb);
6373 bnapi = &bp->bnx2_napi[i];
6374 txr = &bnapi->tx_ring;
6375 txq = netdev_get_tx_queue(dev, i);
b6016b76 6376
35e9010b 6377 if (unlikely(bnx2_tx_avail(bp, txr) <
a550c99b 6378 (skb_shinfo(skb)->nr_frags + 1))) {
706bf240 6379 netif_tx_stop_queue(txq);
3a9c6a49 6380 netdev_err(dev, "BUG! Tx ring full when queue awake!\n");
b6016b76
MC
6381
6382 return NETDEV_TX_BUSY;
6383 }
6384 len = skb_headlen(skb);
35e9010b 6385 prod = txr->tx_prod;
b6016b76
MC
6386 ring_prod = TX_RING_IDX(prod);
6387
6388 vlan_tag_flags = 0;
84fa7933 6389 if (skb->ip_summed == CHECKSUM_PARTIAL) {
b6016b76
MC
6390 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6391 }
6392
729b85cd 6393#ifdef BCM_VLAN
79ea13ce 6394 if (bp->vlgrp && vlan_tx_tag_present(skb)) {
b6016b76
MC
6395 vlan_tag_flags |=
6396 (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6397 }
729b85cd 6398#endif
fde82055 6399 if ((mss = skb_shinfo(skb)->gso_size)) {
a1efb4b6 6400 u32 tcp_opt_len;
eddc9ec5 6401 struct iphdr *iph;
b6016b76 6402
b6016b76
MC
6403 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6404
4666f87a
MC
6405 tcp_opt_len = tcp_optlen(skb);
6406
6407 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6408 u32 tcp_off = skb_transport_offset(skb) -
6409 sizeof(struct ipv6hdr) - ETH_HLEN;
ab6a5bb6 6410
4666f87a
MC
6411 vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6412 TX_BD_FLAGS_SW_FLAGS;
6413 if (likely(tcp_off == 0))
6414 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6415 else {
6416 tcp_off >>= 3;
6417 vlan_tag_flags |= ((tcp_off & 0x3) <<
6418 TX_BD_FLAGS_TCP6_OFF0_SHL) |
6419 ((tcp_off & 0x10) <<
6420 TX_BD_FLAGS_TCP6_OFF4_SHL);
6421 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6422 }
6423 } else {
4666f87a 6424 iph = ip_hdr(skb);
4666f87a
MC
6425 if (tcp_opt_len || (iph->ihl > 5)) {
6426 vlan_tag_flags |= ((iph->ihl - 5) +
6427 (tcp_opt_len >> 2)) << 8;
6428 }
b6016b76 6429 }
4666f87a 6430 } else
b6016b76 6431 mss = 0;
b6016b76 6432
e95524a7
AD
6433 mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6434 if (pci_dma_mapping_error(bp->pdev, mapping)) {
3d16af86
BL
6435 dev_kfree_skb(skb);
6436 return NETDEV_TX_OK;
6437 }
6438
35e9010b 6439 tx_buf = &txr->tx_buf_ring[ring_prod];
b6016b76 6440 tx_buf->skb = skb;
1a4ccc2d 6441 dma_unmap_addr_set(tx_buf, mapping, mapping);
b6016b76 6442
35e9010b 6443 txbd = &txr->tx_desc_ring[ring_prod];
b6016b76
MC
6444
6445 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6446 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6447 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6448 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6449
6450 last_frag = skb_shinfo(skb)->nr_frags;
d62fda08
ED
6451 tx_buf->nr_frags = last_frag;
6452 tx_buf->is_gso = skb_is_gso(skb);
b6016b76
MC
6453
6454 for (i = 0; i < last_frag; i++) {
6455 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6456
6457 prod = NEXT_TX_BD(prod);
6458 ring_prod = TX_RING_IDX(prod);
35e9010b 6459 txbd = &txr->tx_desc_ring[ring_prod];
b6016b76
MC
6460
6461 len = frag->size;
e95524a7
AD
6462 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
6463 len, PCI_DMA_TODEVICE);
6464 if (pci_dma_mapping_error(bp->pdev, mapping))
6465 goto dma_error;
1a4ccc2d 6466 dma_unmap_addr_set(&txr->tx_buf_ring[ring_prod], mapping,
e95524a7 6467 mapping);
b6016b76
MC
6468
6469 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6470 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6471 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6472 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6473
6474 }
6475 txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6476
6477 prod = NEXT_TX_BD(prod);
35e9010b 6478 txr->tx_prod_bseq += skb->len;
b6016b76 6479
35e9010b
MC
6480 REG_WR16(bp, txr->tx_bidx_addr, prod);
6481 REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
b6016b76
MC
6482
6483 mmiowb();
6484
35e9010b 6485 txr->tx_prod = prod;
b6016b76 6486
35e9010b 6487 if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
706bf240 6488 netif_tx_stop_queue(txq);
35e9010b 6489 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
706bf240 6490 netif_tx_wake_queue(txq);
b6016b76
MC
6491 }
6492
e95524a7
AD
6493 return NETDEV_TX_OK;
6494dma_error:
6495 /* save value of frag that failed */
6496 last_frag = i;
6497
6498 /* start back at beginning and unmap skb */
6499 prod = txr->tx_prod;
6500 ring_prod = TX_RING_IDX(prod);
6501 tx_buf = &txr->tx_buf_ring[ring_prod];
6502 tx_buf->skb = NULL;
1a4ccc2d 6503 pci_unmap_single(bp->pdev, dma_unmap_addr(tx_buf, mapping),
e95524a7
AD
6504 skb_headlen(skb), PCI_DMA_TODEVICE);
6505
6506 /* unmap remaining mapped pages */
6507 for (i = 0; i < last_frag; i++) {
6508 prod = NEXT_TX_BD(prod);
6509 ring_prod = TX_RING_IDX(prod);
6510 tx_buf = &txr->tx_buf_ring[ring_prod];
1a4ccc2d 6511 pci_unmap_page(bp->pdev, dma_unmap_addr(tx_buf, mapping),
e95524a7
AD
6512 skb_shinfo(skb)->frags[i].size,
6513 PCI_DMA_TODEVICE);
6514 }
6515
6516 dev_kfree_skb(skb);
b6016b76
MC
6517 return NETDEV_TX_OK;
6518}
6519
6520/* Called with rtnl_lock */
6521static int
6522bnx2_close(struct net_device *dev)
6523{
972ec0d4 6524 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6525
4bb073c0 6526 cancel_work_sync(&bp->reset_task);
afdc08b9 6527
bea3348e 6528 bnx2_disable_int_sync(bp);
35efa7c1 6529 bnx2_napi_disable(bp);
b6016b76 6530 del_timer_sync(&bp->timer);
74bf4ba3 6531 bnx2_shutdown_chip(bp);
8e6a72c4 6532 bnx2_free_irq(bp);
b6016b76
MC
6533 bnx2_free_skbs(bp);
6534 bnx2_free_mem(bp);
6535 bp->link_up = 0;
6536 netif_carrier_off(bp->dev);
829ca9a3 6537 bnx2_set_power_state(bp, PCI_D3hot);
b6016b76
MC
6538 return 0;
6539}
6540
354fcd77
MC
6541static void
6542bnx2_save_stats(struct bnx2 *bp)
6543{
6544 u32 *hw_stats = (u32 *) bp->stats_blk;
6545 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
6546 int i;
6547
6548 /* The 1st 10 counters are 64-bit counters */
6549 for (i = 0; i < 20; i += 2) {
6550 u32 hi;
6551 u64 lo;
6552
c9885fe5
PR
6553 hi = temp_stats[i] + hw_stats[i];
6554 lo = (u64) temp_stats[i + 1] + (u64) hw_stats[i + 1];
354fcd77
MC
6555 if (lo > 0xffffffff)
6556 hi++;
c9885fe5
PR
6557 temp_stats[i] = hi;
6558 temp_stats[i + 1] = lo & 0xffffffff;
354fcd77
MC
6559 }
6560
6561 for ( ; i < sizeof(struct statistics_block) / 4; i++)
c9885fe5 6562 temp_stats[i] += hw_stats[i];
354fcd77
MC
6563}
6564
a4743058 6565#define GET_64BIT_NET_STATS64(ctr) \
b6016b76
MC
6566 (unsigned long) ((unsigned long) (ctr##_hi) << 32) + \
6567 (unsigned long) (ctr##_lo)
6568
a4743058 6569#define GET_64BIT_NET_STATS32(ctr) \
b6016b76
MC
6570 (ctr##_lo)
6571
6572#if (BITS_PER_LONG == 64)
a4743058 6573#define GET_64BIT_NET_STATS(ctr) \
354fcd77
MC
6574 GET_64BIT_NET_STATS64(bp->stats_blk->ctr) + \
6575 GET_64BIT_NET_STATS64(bp->temp_stats_blk->ctr)
b6016b76 6576#else
a4743058 6577#define GET_64BIT_NET_STATS(ctr) \
354fcd77
MC
6578 GET_64BIT_NET_STATS32(bp->stats_blk->ctr) + \
6579 GET_64BIT_NET_STATS32(bp->temp_stats_blk->ctr)
b6016b76
MC
6580#endif
6581
a4743058 6582#define GET_32BIT_NET_STATS(ctr) \
354fcd77
MC
6583 (unsigned long) (bp->stats_blk->ctr + \
6584 bp->temp_stats_blk->ctr)
a4743058 6585
b6016b76
MC
6586static struct net_device_stats *
6587bnx2_get_stats(struct net_device *dev)
6588{
972ec0d4 6589 struct bnx2 *bp = netdev_priv(dev);
d8e8034d 6590 struct net_device_stats *net_stats = &dev->stats;
b6016b76
MC
6591
6592 if (bp->stats_blk == NULL) {
6593 return net_stats;
6594 }
6595 net_stats->rx_packets =
a4743058
MC
6596 GET_64BIT_NET_STATS(stat_IfHCInUcastPkts) +
6597 GET_64BIT_NET_STATS(stat_IfHCInMulticastPkts) +
6598 GET_64BIT_NET_STATS(stat_IfHCInBroadcastPkts);
b6016b76
MC
6599
6600 net_stats->tx_packets =
a4743058
MC
6601 GET_64BIT_NET_STATS(stat_IfHCOutUcastPkts) +
6602 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts) +
6603 GET_64BIT_NET_STATS(stat_IfHCOutBroadcastPkts);
b6016b76
MC
6604
6605 net_stats->rx_bytes =
a4743058 6606 GET_64BIT_NET_STATS(stat_IfHCInOctets);
b6016b76
MC
6607
6608 net_stats->tx_bytes =
a4743058 6609 GET_64BIT_NET_STATS(stat_IfHCOutOctets);
b6016b76 6610
6aa20a22 6611 net_stats->multicast =
a4743058 6612 GET_64BIT_NET_STATS(stat_IfHCOutMulticastPkts);
b6016b76 6613
6aa20a22 6614 net_stats->collisions =
a4743058 6615 GET_32BIT_NET_STATS(stat_EtherStatsCollisions);
b6016b76 6616
6aa20a22 6617 net_stats->rx_length_errors =
a4743058
MC
6618 GET_32BIT_NET_STATS(stat_EtherStatsUndersizePkts) +
6619 GET_32BIT_NET_STATS(stat_EtherStatsOverrsizePkts);
b6016b76 6620
6aa20a22 6621 net_stats->rx_over_errors =
a4743058
MC
6622 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6623 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards);
b6016b76 6624
6aa20a22 6625 net_stats->rx_frame_errors =
a4743058 6626 GET_32BIT_NET_STATS(stat_Dot3StatsAlignmentErrors);
b6016b76 6627
6aa20a22 6628 net_stats->rx_crc_errors =
a4743058 6629 GET_32BIT_NET_STATS(stat_Dot3StatsFCSErrors);
b6016b76
MC
6630
6631 net_stats->rx_errors = net_stats->rx_length_errors +
6632 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6633 net_stats->rx_crc_errors;
6634
6635 net_stats->tx_aborted_errors =
a4743058
MC
6636 GET_32BIT_NET_STATS(stat_Dot3StatsExcessiveCollisions) +
6637 GET_32BIT_NET_STATS(stat_Dot3StatsLateCollisions);
b6016b76 6638
5b0c76ad
MC
6639 if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6640 (CHIP_ID(bp) == CHIP_ID_5708_A0))
b6016b76
MC
6641 net_stats->tx_carrier_errors = 0;
6642 else {
6643 net_stats->tx_carrier_errors =
a4743058 6644 GET_32BIT_NET_STATS(stat_Dot3StatsCarrierSenseErrors);
b6016b76
MC
6645 }
6646
6647 net_stats->tx_errors =
a4743058 6648 GET_32BIT_NET_STATS(stat_emac_tx_stat_dot3statsinternalmactransmiterrors) +
b6016b76
MC
6649 net_stats->tx_aborted_errors +
6650 net_stats->tx_carrier_errors;
6651
cea94db9 6652 net_stats->rx_missed_errors =
a4743058
MC
6653 GET_32BIT_NET_STATS(stat_IfInFTQDiscards) +
6654 GET_32BIT_NET_STATS(stat_IfInMBUFDiscards) +
6655 GET_32BIT_NET_STATS(stat_FwRxDrop);
cea94db9 6656
b6016b76
MC
6657 return net_stats;
6658}
6659
6660/* All ethtool functions called with rtnl_lock */
6661
6662static int
6663bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6664{
972ec0d4 6665 struct bnx2 *bp = netdev_priv(dev);
7b6b8347 6666 int support_serdes = 0, support_copper = 0;
b6016b76
MC
6667
6668 cmd->supported = SUPPORTED_Autoneg;
583c28e5 6669 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7b6b8347
MC
6670 support_serdes = 1;
6671 support_copper = 1;
6672 } else if (bp->phy_port == PORT_FIBRE)
6673 support_serdes = 1;
6674 else
6675 support_copper = 1;
6676
6677 if (support_serdes) {
b6016b76
MC
6678 cmd->supported |= SUPPORTED_1000baseT_Full |
6679 SUPPORTED_FIBRE;
583c28e5 6680 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
605a9e20 6681 cmd->supported |= SUPPORTED_2500baseX_Full;
b6016b76 6682
b6016b76 6683 }
7b6b8347 6684 if (support_copper) {
b6016b76
MC
6685 cmd->supported |= SUPPORTED_10baseT_Half |
6686 SUPPORTED_10baseT_Full |
6687 SUPPORTED_100baseT_Half |
6688 SUPPORTED_100baseT_Full |
6689 SUPPORTED_1000baseT_Full |
6690 SUPPORTED_TP;
6691
b6016b76
MC
6692 }
6693
7b6b8347
MC
6694 spin_lock_bh(&bp->phy_lock);
6695 cmd->port = bp->phy_port;
b6016b76
MC
6696 cmd->advertising = bp->advertising;
6697
6698 if (bp->autoneg & AUTONEG_SPEED) {
6699 cmd->autoneg = AUTONEG_ENABLE;
6700 }
6701 else {
6702 cmd->autoneg = AUTONEG_DISABLE;
6703 }
6704
6705 if (netif_carrier_ok(dev)) {
6706 cmd->speed = bp->line_speed;
6707 cmd->duplex = bp->duplex;
6708 }
6709 else {
6710 cmd->speed = -1;
6711 cmd->duplex = -1;
6712 }
7b6b8347 6713 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6714
6715 cmd->transceiver = XCVR_INTERNAL;
6716 cmd->phy_address = bp->phy_addr;
6717
6718 return 0;
6719}
6aa20a22 6720
b6016b76
MC
6721static int
6722bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6723{
972ec0d4 6724 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6725 u8 autoneg = bp->autoneg;
6726 u8 req_duplex = bp->req_duplex;
6727 u16 req_line_speed = bp->req_line_speed;
6728 u32 advertising = bp->advertising;
7b6b8347
MC
6729 int err = -EINVAL;
6730
6731 spin_lock_bh(&bp->phy_lock);
6732
6733 if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6734 goto err_out_unlock;
6735
583c28e5
MC
6736 if (cmd->port != bp->phy_port &&
6737 !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
7b6b8347 6738 goto err_out_unlock;
b6016b76 6739
d6b14486
MC
6740 /* If device is down, we can store the settings only if the user
6741 * is setting the currently active port.
6742 */
6743 if (!netif_running(dev) && cmd->port != bp->phy_port)
6744 goto err_out_unlock;
6745
b6016b76
MC
6746 if (cmd->autoneg == AUTONEG_ENABLE) {
6747 autoneg |= AUTONEG_SPEED;
6748
beb499af
MC
6749 advertising = cmd->advertising;
6750 if (cmd->port == PORT_TP) {
6751 advertising &= ETHTOOL_ALL_COPPER_SPEED;
6752 if (!advertising)
b6016b76 6753 advertising = ETHTOOL_ALL_COPPER_SPEED;
beb499af
MC
6754 } else {
6755 advertising &= ETHTOOL_ALL_FIBRE_SPEED;
6756 if (!advertising)
6757 advertising = ETHTOOL_ALL_FIBRE_SPEED;
b6016b76
MC
6758 }
6759 advertising |= ADVERTISED_Autoneg;
6760 }
6761 else {
7b6b8347 6762 if (cmd->port == PORT_FIBRE) {
80be4434
MC
6763 if ((cmd->speed != SPEED_1000 &&
6764 cmd->speed != SPEED_2500) ||
6765 (cmd->duplex != DUPLEX_FULL))
7b6b8347 6766 goto err_out_unlock;
80be4434
MC
6767
6768 if (cmd->speed == SPEED_2500 &&
583c28e5 6769 !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
7b6b8347 6770 goto err_out_unlock;
b6016b76 6771 }
7b6b8347
MC
6772 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6773 goto err_out_unlock;
6774
b6016b76
MC
6775 autoneg &= ~AUTONEG_SPEED;
6776 req_line_speed = cmd->speed;
6777 req_duplex = cmd->duplex;
6778 advertising = 0;
6779 }
6780
6781 bp->autoneg = autoneg;
6782 bp->advertising = advertising;
6783 bp->req_line_speed = req_line_speed;
6784 bp->req_duplex = req_duplex;
6785
d6b14486
MC
6786 err = 0;
6787 /* If device is down, the new settings will be picked up when it is
6788 * brought up.
6789 */
6790 if (netif_running(dev))
6791 err = bnx2_setup_phy(bp, cmd->port);
b6016b76 6792
7b6b8347 6793err_out_unlock:
c770a65c 6794 spin_unlock_bh(&bp->phy_lock);
b6016b76 6795
7b6b8347 6796 return err;
b6016b76
MC
6797}
6798
6799static void
6800bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6801{
972ec0d4 6802 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6803
6804 strcpy(info->driver, DRV_MODULE_NAME);
6805 strcpy(info->version, DRV_MODULE_VERSION);
6806 strcpy(info->bus_info, pci_name(bp->pdev));
58fc2ea4 6807 strcpy(info->fw_version, bp->fw_version);
b6016b76
MC
6808}
6809
244ac4f4
MC
6810#define BNX2_REGDUMP_LEN (32 * 1024)
6811
6812static int
6813bnx2_get_regs_len(struct net_device *dev)
6814{
6815 return BNX2_REGDUMP_LEN;
6816}
6817
6818static void
6819bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6820{
6821 u32 *p = _p, i, offset;
6822 u8 *orig_p = _p;
6823 struct bnx2 *bp = netdev_priv(dev);
6824 u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6825 0x0800, 0x0880, 0x0c00, 0x0c10,
6826 0x0c30, 0x0d08, 0x1000, 0x101c,
6827 0x1040, 0x1048, 0x1080, 0x10a4,
6828 0x1400, 0x1490, 0x1498, 0x14f0,
6829 0x1500, 0x155c, 0x1580, 0x15dc,
6830 0x1600, 0x1658, 0x1680, 0x16d8,
6831 0x1800, 0x1820, 0x1840, 0x1854,
6832 0x1880, 0x1894, 0x1900, 0x1984,
6833 0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6834 0x1c80, 0x1c94, 0x1d00, 0x1d84,
6835 0x2000, 0x2030, 0x23c0, 0x2400,
6836 0x2800, 0x2820, 0x2830, 0x2850,
6837 0x2b40, 0x2c10, 0x2fc0, 0x3058,
6838 0x3c00, 0x3c94, 0x4000, 0x4010,
6839 0x4080, 0x4090, 0x43c0, 0x4458,
6840 0x4c00, 0x4c18, 0x4c40, 0x4c54,
6841 0x4fc0, 0x5010, 0x53c0, 0x5444,
6842 0x5c00, 0x5c18, 0x5c80, 0x5c90,
6843 0x5fc0, 0x6000, 0x6400, 0x6428,
6844 0x6800, 0x6848, 0x684c, 0x6860,
6845 0x6888, 0x6910, 0x8000 };
6846
6847 regs->version = 0;
6848
6849 memset(p, 0, BNX2_REGDUMP_LEN);
6850
6851 if (!netif_running(bp->dev))
6852 return;
6853
6854 i = 0;
6855 offset = reg_boundaries[0];
6856 p += offset;
6857 while (offset < BNX2_REGDUMP_LEN) {
6858 *p++ = REG_RD(bp, offset);
6859 offset += 4;
6860 if (offset == reg_boundaries[i + 1]) {
6861 offset = reg_boundaries[i + 2];
6862 p = (u32 *) (orig_p + offset);
6863 i += 2;
6864 }
6865 }
6866}
6867
b6016b76
MC
6868static void
6869bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6870{
972ec0d4 6871 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6872
f86e82fb 6873 if (bp->flags & BNX2_FLAG_NO_WOL) {
b6016b76
MC
6874 wol->supported = 0;
6875 wol->wolopts = 0;
6876 }
6877 else {
6878 wol->supported = WAKE_MAGIC;
6879 if (bp->wol)
6880 wol->wolopts = WAKE_MAGIC;
6881 else
6882 wol->wolopts = 0;
6883 }
6884 memset(&wol->sopass, 0, sizeof(wol->sopass));
6885}
6886
6887static int
6888bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6889{
972ec0d4 6890 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6891
6892 if (wol->wolopts & ~WAKE_MAGIC)
6893 return -EINVAL;
6894
6895 if (wol->wolopts & WAKE_MAGIC) {
f86e82fb 6896 if (bp->flags & BNX2_FLAG_NO_WOL)
b6016b76
MC
6897 return -EINVAL;
6898
6899 bp->wol = 1;
6900 }
6901 else {
6902 bp->wol = 0;
6903 }
6904 return 0;
6905}
6906
6907static int
6908bnx2_nway_reset(struct net_device *dev)
6909{
972ec0d4 6910 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6911 u32 bmcr;
6912
9f52b564
MC
6913 if (!netif_running(dev))
6914 return -EAGAIN;
6915
b6016b76
MC
6916 if (!(bp->autoneg & AUTONEG_SPEED)) {
6917 return -EINVAL;
6918 }
6919
c770a65c 6920 spin_lock_bh(&bp->phy_lock);
b6016b76 6921
583c28e5 6922 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
7b6b8347
MC
6923 int rc;
6924
6925 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6926 spin_unlock_bh(&bp->phy_lock);
6927 return rc;
6928 }
6929
b6016b76 6930 /* Force a link down visible on the other side */
583c28e5 6931 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
ca58c3af 6932 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
c770a65c 6933 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6934
6935 msleep(20);
6936
c770a65c 6937 spin_lock_bh(&bp->phy_lock);
f8dd064e 6938
40105c0b 6939 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
f8dd064e
MC
6940 bp->serdes_an_pending = 1;
6941 mod_timer(&bp->timer, jiffies + bp->current_interval);
b6016b76
MC
6942 }
6943
ca58c3af 6944 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
b6016b76 6945 bmcr &= ~BMCR_LOOPBACK;
ca58c3af 6946 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
b6016b76 6947
c770a65c 6948 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
6949
6950 return 0;
6951}
6952
7959ea25
ON
6953static u32
6954bnx2_get_link(struct net_device *dev)
6955{
6956 struct bnx2 *bp = netdev_priv(dev);
6957
6958 return bp->link_up;
6959}
6960
b6016b76
MC
6961static int
6962bnx2_get_eeprom_len(struct net_device *dev)
6963{
972ec0d4 6964 struct bnx2 *bp = netdev_priv(dev);
b6016b76 6965
1122db71 6966 if (bp->flash_info == NULL)
b6016b76
MC
6967 return 0;
6968
1122db71 6969 return (int) bp->flash_size;
b6016b76
MC
6970}
6971
6972static int
6973bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6974 u8 *eebuf)
6975{
972ec0d4 6976 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6977 int rc;
6978
9f52b564
MC
6979 if (!netif_running(dev))
6980 return -EAGAIN;
6981
1064e944 6982 /* parameters already validated in ethtool_get_eeprom */
b6016b76
MC
6983
6984 rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6985
6986 return rc;
6987}
6988
6989static int
6990bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6991 u8 *eebuf)
6992{
972ec0d4 6993 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
6994 int rc;
6995
9f52b564
MC
6996 if (!netif_running(dev))
6997 return -EAGAIN;
6998
1064e944 6999 /* parameters already validated in ethtool_set_eeprom */
b6016b76
MC
7000
7001 rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
7002
7003 return rc;
7004}
7005
7006static int
7007bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7008{
972ec0d4 7009 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7010
7011 memset(coal, 0, sizeof(struct ethtool_coalesce));
7012
7013 coal->rx_coalesce_usecs = bp->rx_ticks;
7014 coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
7015 coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
7016 coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
7017
7018 coal->tx_coalesce_usecs = bp->tx_ticks;
7019 coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
7020 coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
7021 coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
7022
7023 coal->stats_block_coalesce_usecs = bp->stats_ticks;
7024
7025 return 0;
7026}
7027
7028static int
7029bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
7030{
972ec0d4 7031 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7032
7033 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
7034 if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
7035
6aa20a22 7036 bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
b6016b76
MC
7037 if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
7038
7039 bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
7040 if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
7041
7042 bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
7043 if (bp->rx_quick_cons_trip_int > 0xff)
7044 bp->rx_quick_cons_trip_int = 0xff;
7045
7046 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
7047 if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
7048
7049 bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
7050 if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
7051
7052 bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
7053 if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
7054
7055 bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
7056 if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
7057 0xff;
7058
7059 bp->stats_ticks = coal->stats_block_coalesce_usecs;
61d9e3fa 7060 if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
02537b06
MC
7061 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
7062 bp->stats_ticks = USEC_PER_SEC;
7063 }
7ea6920e
MC
7064 if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
7065 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7066 bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
b6016b76
MC
7067
7068 if (netif_running(bp->dev)) {
212f9934 7069 bnx2_netif_stop(bp, true);
9a120bc5 7070 bnx2_init_nic(bp, 0);
212f9934 7071 bnx2_netif_start(bp, true);
b6016b76
MC
7072 }
7073
7074 return 0;
7075}
7076
7077static void
7078bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7079{
972ec0d4 7080 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7081
13daffa2 7082 ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
b6016b76 7083 ering->rx_mini_max_pending = 0;
47bf4246 7084 ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
b6016b76
MC
7085
7086 ering->rx_pending = bp->rx_ring_size;
7087 ering->rx_mini_pending = 0;
47bf4246 7088 ering->rx_jumbo_pending = bp->rx_pg_ring_size;
b6016b76
MC
7089
7090 ering->tx_max_pending = MAX_TX_DESC_CNT;
7091 ering->tx_pending = bp->tx_ring_size;
7092}
7093
7094static int
5d5d0015 7095bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
b6016b76 7096{
13daffa2 7097 if (netif_running(bp->dev)) {
354fcd77
MC
7098 /* Reset will erase chipset stats; save them */
7099 bnx2_save_stats(bp);
7100
212f9934 7101 bnx2_netif_stop(bp, true);
13daffa2
MC
7102 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
7103 bnx2_free_skbs(bp);
7104 bnx2_free_mem(bp);
7105 }
7106
5d5d0015
MC
7107 bnx2_set_rx_ring_size(bp, rx);
7108 bp->tx_ring_size = tx;
b6016b76
MC
7109
7110 if (netif_running(bp->dev)) {
13daffa2
MC
7111 int rc;
7112
7113 rc = bnx2_alloc_mem(bp);
6fefb65e
MC
7114 if (!rc)
7115 rc = bnx2_init_nic(bp, 0);
7116
7117 if (rc) {
7118 bnx2_napi_enable(bp);
7119 dev_close(bp->dev);
13daffa2 7120 return rc;
6fefb65e 7121 }
e9f26c49
MC
7122#ifdef BCM_CNIC
7123 mutex_lock(&bp->cnic_lock);
7124 /* Let cnic know about the new status block. */
7125 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD)
7126 bnx2_setup_cnic_irq_info(bp);
7127 mutex_unlock(&bp->cnic_lock);
7128#endif
212f9934 7129 bnx2_netif_start(bp, true);
b6016b76 7130 }
b6016b76
MC
7131 return 0;
7132}
7133
5d5d0015
MC
7134static int
7135bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7136{
7137 struct bnx2 *bp = netdev_priv(dev);
7138 int rc;
7139
7140 if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
7141 (ering->tx_pending > MAX_TX_DESC_CNT) ||
7142 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7143
7144 return -EINVAL;
7145 }
7146 rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
7147 return rc;
7148}
7149
b6016b76
MC
7150static void
7151bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7152{
972ec0d4 7153 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7154
7155 epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7156 epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7157 epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7158}
7159
7160static int
7161bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7162{
972ec0d4 7163 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7164
7165 bp->req_flow_ctrl = 0;
7166 if (epause->rx_pause)
7167 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7168 if (epause->tx_pause)
7169 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7170
7171 if (epause->autoneg) {
7172 bp->autoneg |= AUTONEG_FLOW_CTRL;
7173 }
7174 else {
7175 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7176 }
7177
9f52b564
MC
7178 if (netif_running(dev)) {
7179 spin_lock_bh(&bp->phy_lock);
7180 bnx2_setup_phy(bp, bp->phy_port);
7181 spin_unlock_bh(&bp->phy_lock);
7182 }
b6016b76
MC
7183
7184 return 0;
7185}
7186
7187static u32
7188bnx2_get_rx_csum(struct net_device *dev)
7189{
972ec0d4 7190 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7191
7192 return bp->rx_csum;
7193}
7194
7195static int
7196bnx2_set_rx_csum(struct net_device *dev, u32 data)
7197{
972ec0d4 7198 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7199
7200 bp->rx_csum = data;
7201 return 0;
7202}
7203
b11d6213
MC
7204static int
7205bnx2_set_tso(struct net_device *dev, u32 data)
7206{
4666f87a
MC
7207 struct bnx2 *bp = netdev_priv(dev);
7208
7209 if (data) {
b11d6213 7210 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
4666f87a
MC
7211 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7212 dev->features |= NETIF_F_TSO6;
7213 } else
7214 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
7215 NETIF_F_TSO_ECN);
b11d6213
MC
7216 return 0;
7217}
7218
14ab9b86 7219static struct {
b6016b76 7220 char string[ETH_GSTRING_LEN];
790dab2f 7221} bnx2_stats_str_arr[] = {
b6016b76
MC
7222 { "rx_bytes" },
7223 { "rx_error_bytes" },
7224 { "tx_bytes" },
7225 { "tx_error_bytes" },
7226 { "rx_ucast_packets" },
7227 { "rx_mcast_packets" },
7228 { "rx_bcast_packets" },
7229 { "tx_ucast_packets" },
7230 { "tx_mcast_packets" },
7231 { "tx_bcast_packets" },
7232 { "tx_mac_errors" },
7233 { "tx_carrier_errors" },
7234 { "rx_crc_errors" },
7235 { "rx_align_errors" },
7236 { "tx_single_collisions" },
7237 { "tx_multi_collisions" },
7238 { "tx_deferred" },
7239 { "tx_excess_collisions" },
7240 { "tx_late_collisions" },
7241 { "tx_total_collisions" },
7242 { "rx_fragments" },
7243 { "rx_jabbers" },
7244 { "rx_undersize_packets" },
7245 { "rx_oversize_packets" },
7246 { "rx_64_byte_packets" },
7247 { "rx_65_to_127_byte_packets" },
7248 { "rx_128_to_255_byte_packets" },
7249 { "rx_256_to_511_byte_packets" },
7250 { "rx_512_to_1023_byte_packets" },
7251 { "rx_1024_to_1522_byte_packets" },
7252 { "rx_1523_to_9022_byte_packets" },
7253 { "tx_64_byte_packets" },
7254 { "tx_65_to_127_byte_packets" },
7255 { "tx_128_to_255_byte_packets" },
7256 { "tx_256_to_511_byte_packets" },
7257 { "tx_512_to_1023_byte_packets" },
7258 { "tx_1024_to_1522_byte_packets" },
7259 { "tx_1523_to_9022_byte_packets" },
7260 { "rx_xon_frames" },
7261 { "rx_xoff_frames" },
7262 { "tx_xon_frames" },
7263 { "tx_xoff_frames" },
7264 { "rx_mac_ctrl_frames" },
7265 { "rx_filtered_packets" },
790dab2f 7266 { "rx_ftq_discards" },
b6016b76 7267 { "rx_discards" },
cea94db9 7268 { "rx_fw_discards" },
b6016b76
MC
7269};
7270
790dab2f
MC
7271#define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\
7272 sizeof(bnx2_stats_str_arr[0]))
7273
b6016b76
MC
7274#define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7275
f71e1309 7276static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
b6016b76
MC
7277 STATS_OFFSET32(stat_IfHCInOctets_hi),
7278 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7279 STATS_OFFSET32(stat_IfHCOutOctets_hi),
7280 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7281 STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7282 STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7283 STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7284 STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7285 STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7286 STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7287 STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
6aa20a22
JG
7288 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7289 STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7290 STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7291 STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7292 STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7293 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7294 STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7295 STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7296 STATS_OFFSET32(stat_EtherStatsCollisions),
7297 STATS_OFFSET32(stat_EtherStatsFragments),
7298 STATS_OFFSET32(stat_EtherStatsJabbers),
7299 STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7300 STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7301 STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7302 STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7303 STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7304 STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7305 STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7306 STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7307 STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7308 STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7309 STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7310 STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7311 STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7312 STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7313 STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7314 STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7315 STATS_OFFSET32(stat_XonPauseFramesReceived),
7316 STATS_OFFSET32(stat_XoffPauseFramesReceived),
7317 STATS_OFFSET32(stat_OutXonSent),
7318 STATS_OFFSET32(stat_OutXoffSent),
7319 STATS_OFFSET32(stat_MacControlFramesReceived),
7320 STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
790dab2f 7321 STATS_OFFSET32(stat_IfInFTQDiscards),
6aa20a22 7322 STATS_OFFSET32(stat_IfInMBUFDiscards),
cea94db9 7323 STATS_OFFSET32(stat_FwRxDrop),
b6016b76
MC
7324};
7325
7326/* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7327 * skipped because of errata.
6aa20a22 7328 */
14ab9b86 7329static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
b6016b76
MC
7330 8,0,8,8,8,8,8,8,8,8,
7331 4,0,4,4,4,4,4,4,4,4,
7332 4,4,4,4,4,4,4,4,4,4,
7333 4,4,4,4,4,4,4,4,4,4,
790dab2f 7334 4,4,4,4,4,4,4,
b6016b76
MC
7335};
7336
5b0c76ad
MC
7337static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7338 8,0,8,8,8,8,8,8,8,8,
7339 4,4,4,4,4,4,4,4,4,4,
7340 4,4,4,4,4,4,4,4,4,4,
7341 4,4,4,4,4,4,4,4,4,4,
790dab2f 7342 4,4,4,4,4,4,4,
5b0c76ad
MC
7343};
7344
b6016b76
MC
7345#define BNX2_NUM_TESTS 6
7346
14ab9b86 7347static struct {
b6016b76
MC
7348 char string[ETH_GSTRING_LEN];
7349} bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7350 { "register_test (offline)" },
7351 { "memory_test (offline)" },
7352 { "loopback_test (offline)" },
7353 { "nvram_test (online)" },
7354 { "interrupt_test (online)" },
7355 { "link_test (online)" },
7356};
7357
7358static int
b9f2c044 7359bnx2_get_sset_count(struct net_device *dev, int sset)
b6016b76 7360{
b9f2c044
JG
7361 switch (sset) {
7362 case ETH_SS_TEST:
7363 return BNX2_NUM_TESTS;
7364 case ETH_SS_STATS:
7365 return BNX2_NUM_STATS;
7366 default:
7367 return -EOPNOTSUPP;
7368 }
b6016b76
MC
7369}
7370
7371static void
7372bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7373{
972ec0d4 7374 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7375
9f52b564
MC
7376 bnx2_set_power_state(bp, PCI_D0);
7377
b6016b76
MC
7378 memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7379 if (etest->flags & ETH_TEST_FL_OFFLINE) {
80be4434
MC
7380 int i;
7381
212f9934 7382 bnx2_netif_stop(bp, true);
b6016b76
MC
7383 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7384 bnx2_free_skbs(bp);
7385
7386 if (bnx2_test_registers(bp) != 0) {
7387 buf[0] = 1;
7388 etest->flags |= ETH_TEST_FL_FAILED;
7389 }
7390 if (bnx2_test_memory(bp) != 0) {
7391 buf[1] = 1;
7392 etest->flags |= ETH_TEST_FL_FAILED;
7393 }
bc5a0690 7394 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
b6016b76 7395 etest->flags |= ETH_TEST_FL_FAILED;
b6016b76 7396
9f52b564
MC
7397 if (!netif_running(bp->dev))
7398 bnx2_shutdown_chip(bp);
b6016b76 7399 else {
9a120bc5 7400 bnx2_init_nic(bp, 1);
212f9934 7401 bnx2_netif_start(bp, true);
b6016b76
MC
7402 }
7403
7404 /* wait for link up */
80be4434
MC
7405 for (i = 0; i < 7; i++) {
7406 if (bp->link_up)
7407 break;
7408 msleep_interruptible(1000);
7409 }
b6016b76
MC
7410 }
7411
7412 if (bnx2_test_nvram(bp) != 0) {
7413 buf[3] = 1;
7414 etest->flags |= ETH_TEST_FL_FAILED;
7415 }
7416 if (bnx2_test_intr(bp) != 0) {
7417 buf[4] = 1;
7418 etest->flags |= ETH_TEST_FL_FAILED;
7419 }
7420
7421 if (bnx2_test_link(bp) != 0) {
7422 buf[5] = 1;
7423 etest->flags |= ETH_TEST_FL_FAILED;
7424
7425 }
9f52b564
MC
7426 if (!netif_running(bp->dev))
7427 bnx2_set_power_state(bp, PCI_D3hot);
b6016b76
MC
7428}
7429
7430static void
7431bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7432{
7433 switch (stringset) {
7434 case ETH_SS_STATS:
7435 memcpy(buf, bnx2_stats_str_arr,
7436 sizeof(bnx2_stats_str_arr));
7437 break;
7438 case ETH_SS_TEST:
7439 memcpy(buf, bnx2_tests_str_arr,
7440 sizeof(bnx2_tests_str_arr));
7441 break;
7442 }
7443}
7444
b6016b76
MC
7445static void
7446bnx2_get_ethtool_stats(struct net_device *dev,
7447 struct ethtool_stats *stats, u64 *buf)
7448{
972ec0d4 7449 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7450 int i;
7451 u32 *hw_stats = (u32 *) bp->stats_blk;
354fcd77 7452 u32 *temp_stats = (u32 *) bp->temp_stats_blk;
14ab9b86 7453 u8 *stats_len_arr = NULL;
b6016b76
MC
7454
7455 if (hw_stats == NULL) {
7456 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7457 return;
7458 }
7459
5b0c76ad
MC
7460 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7461 (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7462 (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7463 (CHIP_ID(bp) == CHIP_ID_5708_A0))
b6016b76 7464 stats_len_arr = bnx2_5706_stats_len_arr;
5b0c76ad
MC
7465 else
7466 stats_len_arr = bnx2_5708_stats_len_arr;
b6016b76
MC
7467
7468 for (i = 0; i < BNX2_NUM_STATS; i++) {
354fcd77
MC
7469 unsigned long offset;
7470
b6016b76
MC
7471 if (stats_len_arr[i] == 0) {
7472 /* skip this counter */
7473 buf[i] = 0;
7474 continue;
7475 }
354fcd77
MC
7476
7477 offset = bnx2_stats_offset_arr[i];
b6016b76
MC
7478 if (stats_len_arr[i] == 4) {
7479 /* 4-byte counter */
354fcd77
MC
7480 buf[i] = (u64) *(hw_stats + offset) +
7481 *(temp_stats + offset);
b6016b76
MC
7482 continue;
7483 }
7484 /* 8-byte counter */
354fcd77
MC
7485 buf[i] = (((u64) *(hw_stats + offset)) << 32) +
7486 *(hw_stats + offset + 1) +
7487 (((u64) *(temp_stats + offset)) << 32) +
7488 *(temp_stats + offset + 1);
b6016b76
MC
7489 }
7490}
7491
7492static int
7493bnx2_phys_id(struct net_device *dev, u32 data)
7494{
972ec0d4 7495 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7496 int i;
7497 u32 save;
7498
9f52b564
MC
7499 bnx2_set_power_state(bp, PCI_D0);
7500
b6016b76
MC
7501 if (data == 0)
7502 data = 2;
7503
7504 save = REG_RD(bp, BNX2_MISC_CFG);
7505 REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7506
7507 for (i = 0; i < (data * 2); i++) {
7508 if ((i % 2) == 0) {
7509 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7510 }
7511 else {
7512 REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7513 BNX2_EMAC_LED_1000MB_OVERRIDE |
7514 BNX2_EMAC_LED_100MB_OVERRIDE |
7515 BNX2_EMAC_LED_10MB_OVERRIDE |
7516 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7517 BNX2_EMAC_LED_TRAFFIC);
7518 }
7519 msleep_interruptible(500);
7520 if (signal_pending(current))
7521 break;
7522 }
7523 REG_WR(bp, BNX2_EMAC_LED, 0);
7524 REG_WR(bp, BNX2_MISC_CFG, save);
9f52b564
MC
7525
7526 if (!netif_running(dev))
7527 bnx2_set_power_state(bp, PCI_D3hot);
7528
b6016b76
MC
7529 return 0;
7530}
7531
4666f87a
MC
7532static int
7533bnx2_set_tx_csum(struct net_device *dev, u32 data)
7534{
7535 struct bnx2 *bp = netdev_priv(dev);
7536
7537 if (CHIP_NUM(bp) == CHIP_NUM_5709)
6460d948 7538 return (ethtool_op_set_tx_ipv6_csum(dev, data));
4666f87a
MC
7539 else
7540 return (ethtool_op_set_tx_csum(dev, data));
7541}
7542
7282d491 7543static const struct ethtool_ops bnx2_ethtool_ops = {
b6016b76
MC
7544 .get_settings = bnx2_get_settings,
7545 .set_settings = bnx2_set_settings,
7546 .get_drvinfo = bnx2_get_drvinfo,
244ac4f4
MC
7547 .get_regs_len = bnx2_get_regs_len,
7548 .get_regs = bnx2_get_regs,
b6016b76
MC
7549 .get_wol = bnx2_get_wol,
7550 .set_wol = bnx2_set_wol,
7551 .nway_reset = bnx2_nway_reset,
7959ea25 7552 .get_link = bnx2_get_link,
b6016b76
MC
7553 .get_eeprom_len = bnx2_get_eeprom_len,
7554 .get_eeprom = bnx2_get_eeprom,
7555 .set_eeprom = bnx2_set_eeprom,
7556 .get_coalesce = bnx2_get_coalesce,
7557 .set_coalesce = bnx2_set_coalesce,
7558 .get_ringparam = bnx2_get_ringparam,
7559 .set_ringparam = bnx2_set_ringparam,
7560 .get_pauseparam = bnx2_get_pauseparam,
7561 .set_pauseparam = bnx2_set_pauseparam,
7562 .get_rx_csum = bnx2_get_rx_csum,
7563 .set_rx_csum = bnx2_set_rx_csum,
4666f87a 7564 .set_tx_csum = bnx2_set_tx_csum,
b6016b76 7565 .set_sg = ethtool_op_set_sg,
b11d6213 7566 .set_tso = bnx2_set_tso,
b6016b76
MC
7567 .self_test = bnx2_self_test,
7568 .get_strings = bnx2_get_strings,
7569 .phys_id = bnx2_phys_id,
b6016b76 7570 .get_ethtool_stats = bnx2_get_ethtool_stats,
b9f2c044 7571 .get_sset_count = bnx2_get_sset_count,
b6016b76
MC
7572};
7573
7574/* Called with rtnl_lock */
7575static int
7576bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7577{
14ab9b86 7578 struct mii_ioctl_data *data = if_mii(ifr);
972ec0d4 7579 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7580 int err;
7581
7582 switch(cmd) {
7583 case SIOCGMIIPHY:
7584 data->phy_id = bp->phy_addr;
7585
7586 /* fallthru */
7587 case SIOCGMIIREG: {
7588 u32 mii_regval;
7589
583c28e5 7590 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7b6b8347
MC
7591 return -EOPNOTSUPP;
7592
dad3e452
MC
7593 if (!netif_running(dev))
7594 return -EAGAIN;
7595
c770a65c 7596 spin_lock_bh(&bp->phy_lock);
b6016b76 7597 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
c770a65c 7598 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
7599
7600 data->val_out = mii_regval;
7601
7602 return err;
7603 }
7604
7605 case SIOCSMIIREG:
583c28e5 7606 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7b6b8347
MC
7607 return -EOPNOTSUPP;
7608
dad3e452
MC
7609 if (!netif_running(dev))
7610 return -EAGAIN;
7611
c770a65c 7612 spin_lock_bh(&bp->phy_lock);
b6016b76 7613 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
c770a65c 7614 spin_unlock_bh(&bp->phy_lock);
b6016b76
MC
7615
7616 return err;
7617
7618 default:
7619 /* do nothing */
7620 break;
7621 }
7622 return -EOPNOTSUPP;
7623}
7624
7625/* Called with rtnl_lock */
7626static int
7627bnx2_change_mac_addr(struct net_device *dev, void *p)
7628{
7629 struct sockaddr *addr = p;
972ec0d4 7630 struct bnx2 *bp = netdev_priv(dev);
b6016b76 7631
73eef4cd
MC
7632 if (!is_valid_ether_addr(addr->sa_data))
7633 return -EINVAL;
7634
b6016b76
MC
7635 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7636 if (netif_running(dev))
5fcaed01 7637 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
b6016b76
MC
7638
7639 return 0;
7640}
7641
7642/* Called with rtnl_lock */
7643static int
7644bnx2_change_mtu(struct net_device *dev, int new_mtu)
7645{
972ec0d4 7646 struct bnx2 *bp = netdev_priv(dev);
b6016b76
MC
7647
7648 if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7649 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7650 return -EINVAL;
7651
7652 dev->mtu = new_mtu;
5d5d0015 7653 return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
b6016b76
MC
7654}
7655
257ddbda 7656#ifdef CONFIG_NET_POLL_CONTROLLER
b6016b76
MC
7657static void
7658poll_bnx2(struct net_device *dev)
7659{
972ec0d4 7660 struct bnx2 *bp = netdev_priv(dev);
b2af2c1d 7661 int i;
b6016b76 7662
b2af2c1d 7663 for (i = 0; i < bp->irq_nvecs; i++) {
1bf1e347
MC
7664 struct bnx2_irq *irq = &bp->irq_tbl[i];
7665
7666 disable_irq(irq->vector);
7667 irq->handler(irq->vector, &bp->bnx2_napi[i]);
7668 enable_irq(irq->vector);
b2af2c1d 7669 }
b6016b76
MC
7670}
7671#endif
7672
253c8b75
MC
7673static void __devinit
7674bnx2_get_5709_media(struct bnx2 *bp)
7675{
7676 u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7677 u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7678 u32 strap;
7679
7680 if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7681 return;
7682 else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
583c28e5 7683 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
253c8b75
MC
7684 return;
7685 }
7686
7687 if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7688 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7689 else
7690 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7691
7692 if (PCI_FUNC(bp->pdev->devfn) == 0) {
7693 switch (strap) {
7694 case 0x4:
7695 case 0x5:
7696 case 0x6:
583c28e5 7697 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
253c8b75
MC
7698 return;
7699 }
7700 } else {
7701 switch (strap) {
7702 case 0x1:
7703 case 0x2:
7704 case 0x4:
583c28e5 7705 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
253c8b75
MC
7706 return;
7707 }
7708 }
7709}
7710
883e5151
MC
7711static void __devinit
7712bnx2_get_pci_speed(struct bnx2 *bp)
7713{
7714 u32 reg;
7715
7716 reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7717 if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7718 u32 clkreg;
7719
f86e82fb 7720 bp->flags |= BNX2_FLAG_PCIX;
883e5151
MC
7721
7722 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7723
7724 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7725 switch (clkreg) {
7726 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7727 bp->bus_speed_mhz = 133;
7728 break;
7729
7730 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7731 bp->bus_speed_mhz = 100;
7732 break;
7733
7734 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7735 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7736 bp->bus_speed_mhz = 66;
7737 break;
7738
7739 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7740 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7741 bp->bus_speed_mhz = 50;
7742 break;
7743
7744 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7745 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7746 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7747 bp->bus_speed_mhz = 33;
7748 break;
7749 }
7750 }
7751 else {
7752 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7753 bp->bus_speed_mhz = 66;
7754 else
7755 bp->bus_speed_mhz = 33;
7756 }
7757
7758 if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
f86e82fb 7759 bp->flags |= BNX2_FLAG_PCI_32BIT;
883e5151
MC
7760
7761}
7762
76d99061
MC
7763static void __devinit
7764bnx2_read_vpd_fw_ver(struct bnx2 *bp)
7765{
df25bc38 7766 int rc, i, j;
76d99061 7767 u8 *data;
df25bc38 7768 unsigned int block_end, rosize, len;
76d99061 7769
012093f6
MC
7770#define BNX2_VPD_NVRAM_OFFSET 0x300
7771#define BNX2_VPD_LEN 128
76d99061
MC
7772#define BNX2_MAX_VER_SLEN 30
7773
7774 data = kmalloc(256, GFP_KERNEL);
7775 if (!data)
7776 return;
7777
012093f6
MC
7778 rc = bnx2_nvram_read(bp, BNX2_VPD_NVRAM_OFFSET, data + BNX2_VPD_LEN,
7779 BNX2_VPD_LEN);
76d99061
MC
7780 if (rc)
7781 goto vpd_done;
7782
012093f6
MC
7783 for (i = 0; i < BNX2_VPD_LEN; i += 4) {
7784 data[i] = data[i + BNX2_VPD_LEN + 3];
7785 data[i + 1] = data[i + BNX2_VPD_LEN + 2];
7786 data[i + 2] = data[i + BNX2_VPD_LEN + 1];
7787 data[i + 3] = data[i + BNX2_VPD_LEN];
76d99061
MC
7788 }
7789
df25bc38
MC
7790 i = pci_vpd_find_tag(data, 0, BNX2_VPD_LEN, PCI_VPD_LRDT_RO_DATA);
7791 if (i < 0)
7792 goto vpd_done;
76d99061 7793
df25bc38
MC
7794 rosize = pci_vpd_lrdt_size(&data[i]);
7795 i += PCI_VPD_LRDT_TAG_SIZE;
7796 block_end = i + rosize;
76d99061 7797
df25bc38
MC
7798 if (block_end > BNX2_VPD_LEN)
7799 goto vpd_done;
76d99061 7800
df25bc38
MC
7801 j = pci_vpd_find_info_keyword(data, i, rosize,
7802 PCI_VPD_RO_KEYWORD_MFR_ID);
7803 if (j < 0)
7804 goto vpd_done;
76d99061 7805
df25bc38 7806 len = pci_vpd_info_field_size(&data[j]);
76d99061 7807
df25bc38
MC
7808 j += PCI_VPD_INFO_FLD_HDR_SIZE;
7809 if (j + len > block_end || len != 4 ||
7810 memcmp(&data[j], "1028", 4))
7811 goto vpd_done;
4067a854 7812
df25bc38
MC
7813 j = pci_vpd_find_info_keyword(data, i, rosize,
7814 PCI_VPD_RO_KEYWORD_VENDOR0);
7815 if (j < 0)
7816 goto vpd_done;
4067a854 7817
df25bc38 7818 len = pci_vpd_info_field_size(&data[j]);
4067a854 7819
df25bc38
MC
7820 j += PCI_VPD_INFO_FLD_HDR_SIZE;
7821 if (j + len > block_end || len > BNX2_MAX_VER_SLEN)
76d99061 7822 goto vpd_done;
df25bc38
MC
7823
7824 memcpy(bp->fw_version, &data[j], len);
7825 bp->fw_version[len] = ' ';
76d99061
MC
7826
7827vpd_done:
7828 kfree(data);
7829}
7830
b6016b76
MC
7831static int __devinit
7832bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7833{
7834 struct bnx2 *bp;
7835 unsigned long mem_len;
58fc2ea4 7836 int rc, i, j;
b6016b76 7837 u32 reg;
40453c83 7838 u64 dma_mask, persist_dma_mask;
b6016b76 7839
b6016b76 7840 SET_NETDEV_DEV(dev, &pdev->dev);
972ec0d4 7841 bp = netdev_priv(dev);
b6016b76
MC
7842
7843 bp->flags = 0;
7844 bp->phy_flags = 0;
7845
354fcd77
MC
7846 bp->temp_stats_blk =
7847 kzalloc(sizeof(struct statistics_block), GFP_KERNEL);
7848
7849 if (bp->temp_stats_blk == NULL) {
7850 rc = -ENOMEM;
7851 goto err_out;
7852 }
7853
b6016b76
MC
7854 /* enable device (incl. PCI PM wakeup), and bus-mastering */
7855 rc = pci_enable_device(pdev);
7856 if (rc) {
3a9c6a49 7857 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
b6016b76
MC
7858 goto err_out;
7859 }
7860
7861 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
9b91cf9d 7862 dev_err(&pdev->dev,
3a9c6a49 7863 "Cannot find PCI device base address, aborting\n");
b6016b76
MC
7864 rc = -ENODEV;
7865 goto err_out_disable;
7866 }
7867
7868 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7869 if (rc) {
3a9c6a49 7870 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
b6016b76
MC
7871 goto err_out_disable;
7872 }
7873
7874 pci_set_master(pdev);
6ff2da49 7875 pci_save_state(pdev);
b6016b76
MC
7876
7877 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7878 if (bp->pm_cap == 0) {
9b91cf9d 7879 dev_err(&pdev->dev,
3a9c6a49 7880 "Cannot find power management capability, aborting\n");
b6016b76
MC
7881 rc = -EIO;
7882 goto err_out_release;
7883 }
7884
b6016b76
MC
7885 bp->dev = dev;
7886 bp->pdev = pdev;
7887
7888 spin_lock_init(&bp->phy_lock);
1b8227c4 7889 spin_lock_init(&bp->indirect_lock);
c5a88950
MC
7890#ifdef BCM_CNIC
7891 mutex_init(&bp->cnic_lock);
7892#endif
c4028958 7893 INIT_WORK(&bp->reset_task, bnx2_reset_task);
b6016b76
MC
7894
7895 dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
4edd473f 7896 mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
b6016b76
MC
7897 dev->mem_end = dev->mem_start + mem_len;
7898 dev->irq = pdev->irq;
7899
7900 bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7901
7902 if (!bp->regview) {
3a9c6a49 7903 dev_err(&pdev->dev, "Cannot map register space, aborting\n");
b6016b76
MC
7904 rc = -ENOMEM;
7905 goto err_out_release;
7906 }
7907
7908 /* Configure byte swap and enable write to the reg_window registers.
7909 * Rely on CPU to do target byte swapping on big endian systems
7910 * The chip's target access swapping will not swap all accesses
7911 */
7912 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7913 BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7914 BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7915
829ca9a3 7916 bnx2_set_power_state(bp, PCI_D0);
b6016b76
MC
7917
7918 bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7919
883e5151
MC
7920 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7921 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7922 dev_err(&pdev->dev,
3a9c6a49 7923 "Cannot find PCIE capability, aborting\n");
883e5151
MC
7924 rc = -EIO;
7925 goto err_out_unmap;
7926 }
f86e82fb 7927 bp->flags |= BNX2_FLAG_PCIE;
2dd201d7 7928 if (CHIP_REV(bp) == CHIP_REV_Ax)
f86e82fb 7929 bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
883e5151 7930 } else {
59b47d8a
MC
7931 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7932 if (bp->pcix_cap == 0) {
7933 dev_err(&pdev->dev,
3a9c6a49 7934 "Cannot find PCIX capability, aborting\n");
59b47d8a
MC
7935 rc = -EIO;
7936 goto err_out_unmap;
7937 }
61d9e3fa 7938 bp->flags |= BNX2_FLAG_BROKEN_STATS;
59b47d8a
MC
7939 }
7940
b4b36042
MC
7941 if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7942 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
f86e82fb 7943 bp->flags |= BNX2_FLAG_MSIX_CAP;
b4b36042
MC
7944 }
7945
8e6a72c4
MC
7946 if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7947 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
f86e82fb 7948 bp->flags |= BNX2_FLAG_MSI_CAP;
8e6a72c4
MC
7949 }
7950
40453c83
MC
7951 /* 5708 cannot support DMA addresses > 40-bit. */
7952 if (CHIP_NUM(bp) == CHIP_NUM_5708)
50cf156a 7953 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
40453c83 7954 else
6a35528a 7955 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
40453c83
MC
7956
7957 /* Configure DMA attributes. */
7958 if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7959 dev->features |= NETIF_F_HIGHDMA;
7960 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7961 if (rc) {
7962 dev_err(&pdev->dev,
3a9c6a49 7963 "pci_set_consistent_dma_mask failed, aborting\n");
40453c83
MC
7964 goto err_out_unmap;
7965 }
284901a9 7966 } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
3a9c6a49 7967 dev_err(&pdev->dev, "System does not support DMA, aborting\n");
40453c83
MC
7968 goto err_out_unmap;
7969 }
7970
f86e82fb 7971 if (!(bp->flags & BNX2_FLAG_PCIE))
883e5151 7972 bnx2_get_pci_speed(bp);
b6016b76
MC
7973
7974 /* 5706A0 may falsely detect SERR and PERR. */
7975 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7976 reg = REG_RD(bp, PCI_COMMAND);
7977 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7978 REG_WR(bp, PCI_COMMAND, reg);
7979 }
7980 else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
f86e82fb 7981 !(bp->flags & BNX2_FLAG_PCIX)) {
b6016b76 7982
9b91cf9d 7983 dev_err(&pdev->dev,
3a9c6a49 7984 "5706 A1 can only be used in a PCIX bus, aborting\n");
b6016b76
MC
7985 goto err_out_unmap;
7986 }
7987
7988 bnx2_init_nvram(bp);
7989
2726d6e1 7990 reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
e3648b3d
MC
7991
7992 if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
24cb230b
MC
7993 BNX2_SHM_HDR_SIGNATURE_SIG) {
7994 u32 off = PCI_FUNC(pdev->devfn) << 2;
7995
2726d6e1 7996 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
24cb230b 7997 } else
e3648b3d
MC
7998 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7999
b6016b76
MC
8000 /* Get the permanent MAC address. First we need to make sure the
8001 * firmware is actually running.
8002 */
2726d6e1 8003 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
b6016b76
MC
8004
8005 if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
8006 BNX2_DEV_INFO_SIGNATURE_MAGIC) {
3a9c6a49 8007 dev_err(&pdev->dev, "Firmware not running, aborting\n");
b6016b76
MC
8008 rc = -ENODEV;
8009 goto err_out_unmap;
8010 }
8011
76d99061
MC
8012 bnx2_read_vpd_fw_ver(bp);
8013
8014 j = strlen(bp->fw_version);
2726d6e1 8015 reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
76d99061 8016 for (i = 0; i < 3 && j < 24; i++) {
58fc2ea4
MC
8017 u8 num, k, skip0;
8018
76d99061
MC
8019 if (i == 0) {
8020 bp->fw_version[j++] = 'b';
8021 bp->fw_version[j++] = 'c';
8022 bp->fw_version[j++] = ' ';
8023 }
58fc2ea4
MC
8024 num = (u8) (reg >> (24 - (i * 8)));
8025 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
8026 if (num >= k || !skip0 || k == 1) {
8027 bp->fw_version[j++] = (num / k) + '0';
8028 skip0 = 0;
8029 }
8030 }
8031 if (i != 2)
8032 bp->fw_version[j++] = '.';
8033 }
2726d6e1 8034 reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
846f5c62
MC
8035 if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
8036 bp->wol = 1;
8037
8038 if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
f86e82fb 8039 bp->flags |= BNX2_FLAG_ASF_ENABLE;
c2d3db8c
MC
8040
8041 for (i = 0; i < 30; i++) {
2726d6e1 8042 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
c2d3db8c
MC
8043 if (reg & BNX2_CONDITION_MFW_RUN_MASK)
8044 break;
8045 msleep(10);
8046 }
8047 }
2726d6e1 8048 reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
58fc2ea4
MC
8049 reg &= BNX2_CONDITION_MFW_RUN_MASK;
8050 if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
8051 reg != BNX2_CONDITION_MFW_RUN_NONE) {
2726d6e1 8052 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
58fc2ea4 8053
76d99061
MC
8054 if (j < 32)
8055 bp->fw_version[j++] = ' ';
8056 for (i = 0; i < 3 && j < 28; i++) {
2726d6e1 8057 reg = bnx2_reg_rd_ind(bp, addr + i * 4);
58fc2ea4
MC
8058 reg = swab32(reg);
8059 memcpy(&bp->fw_version[j], &reg, 4);
8060 j += 4;
8061 }
8062 }
b6016b76 8063
2726d6e1 8064 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
b6016b76
MC
8065 bp->mac_addr[0] = (u8) (reg >> 8);
8066 bp->mac_addr[1] = (u8) reg;
8067
2726d6e1 8068 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
b6016b76
MC
8069 bp->mac_addr[2] = (u8) (reg >> 24);
8070 bp->mac_addr[3] = (u8) (reg >> 16);
8071 bp->mac_addr[4] = (u8) (reg >> 8);
8072 bp->mac_addr[5] = (u8) reg;
8073
8074 bp->tx_ring_size = MAX_TX_DESC_CNT;
932f3772 8075 bnx2_set_rx_ring_size(bp, 255);
b6016b76
MC
8076
8077 bp->rx_csum = 1;
8078
cf7474a6 8079 bp->tx_quick_cons_trip_int = 2;
b6016b76 8080 bp->tx_quick_cons_trip = 20;
cf7474a6 8081 bp->tx_ticks_int = 18;
b6016b76 8082 bp->tx_ticks = 80;
6aa20a22 8083
cf7474a6
MC
8084 bp->rx_quick_cons_trip_int = 2;
8085 bp->rx_quick_cons_trip = 12;
b6016b76
MC
8086 bp->rx_ticks_int = 18;
8087 bp->rx_ticks = 18;
8088
7ea6920e 8089 bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
b6016b76 8090
ac392abc 8091 bp->current_interval = BNX2_TIMER_INTERVAL;
b6016b76 8092
5b0c76ad
MC
8093 bp->phy_addr = 1;
8094
b6016b76 8095 /* Disable WOL support if we are running on a SERDES chip. */
253c8b75
MC
8096 if (CHIP_NUM(bp) == CHIP_NUM_5709)
8097 bnx2_get_5709_media(bp);
8098 else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
583c28e5 8099 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
bac0dff6 8100
0d8a6571 8101 bp->phy_port = PORT_TP;
583c28e5 8102 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
0d8a6571 8103 bp->phy_port = PORT_FIBRE;
2726d6e1 8104 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
846f5c62 8105 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
f86e82fb 8106 bp->flags |= BNX2_FLAG_NO_WOL;
846f5c62
MC
8107 bp->wol = 0;
8108 }
38ea3686
MC
8109 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
8110 /* Don't do parallel detect on this board because of
8111 * some board problems. The link will not go down
8112 * if we do parallel detect.
8113 */
8114 if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
8115 pdev->subsystem_device == 0x310c)
8116 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
8117 } else {
5b0c76ad 8118 bp->phy_addr = 2;
5b0c76ad 8119 if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
583c28e5 8120 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
5b0c76ad 8121 }
261dd5ca
MC
8122 } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
8123 CHIP_NUM(bp) == CHIP_NUM_5708)
583c28e5 8124 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
fb0c18bd
MC
8125 else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
8126 (CHIP_REV(bp) == CHIP_REV_Ax ||
8127 CHIP_REV(bp) == CHIP_REV_Bx))
583c28e5 8128 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
b6016b76 8129
7c62e83b
MC
8130 bnx2_init_fw_cap(bp);
8131
16088272
MC
8132 if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
8133 (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
5ec6d7bf
MC
8134 (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
8135 !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
f86e82fb 8136 bp->flags |= BNX2_FLAG_NO_WOL;
846f5c62
MC
8137 bp->wol = 0;
8138 }
dda1e390 8139
b6016b76
MC
8140 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
8141 bp->tx_quick_cons_trip_int =
8142 bp->tx_quick_cons_trip;
8143 bp->tx_ticks_int = bp->tx_ticks;
8144 bp->rx_quick_cons_trip_int =
8145 bp->rx_quick_cons_trip;
8146 bp->rx_ticks_int = bp->rx_ticks;
8147 bp->comp_prod_trip_int = bp->comp_prod_trip;
8148 bp->com_ticks_int = bp->com_ticks;
8149 bp->cmd_ticks_int = bp->cmd_ticks;
8150 }
8151
f9317a40
MC
8152 /* Disable MSI on 5706 if AMD 8132 bridge is found.
8153 *
8154 * MSI is defined to be 32-bit write. The 5706 does 64-bit MSI writes
8155 * with byte enables disabled on the unused 32-bit word. This is legal
8156 * but causes problems on the AMD 8132 which will eventually stop
8157 * responding after a while.
8158 *
8159 * AMD believes this incompatibility is unique to the 5706, and
88187dfa 8160 * prefers to locally disable MSI rather than globally disabling it.
f9317a40
MC
8161 */
8162 if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
8163 struct pci_dev *amd_8132 = NULL;
8164
8165 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
8166 PCI_DEVICE_ID_AMD_8132_BRIDGE,
8167 amd_8132))) {
f9317a40 8168
44c10138
AK
8169 if (amd_8132->revision >= 0x10 &&
8170 amd_8132->revision <= 0x13) {
f9317a40
MC
8171 disable_msi = 1;
8172 pci_dev_put(amd_8132);
8173 break;
8174 }
8175 }
8176 }
8177
deaf391b 8178 bnx2_set_default_link(bp);
b6016b76
MC
8179 bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
8180
cd339a0e 8181 init_timer(&bp->timer);
ac392abc 8182 bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
cd339a0e
MC
8183 bp->timer.data = (unsigned long) bp;
8184 bp->timer.function = bnx2_timer;
8185
b6016b76
MC
8186 return 0;
8187
8188err_out_unmap:
8189 if (bp->regview) {
8190 iounmap(bp->regview);
73eef4cd 8191 bp->regview = NULL;
b6016b76
MC
8192 }
8193
8194err_out_release:
8195 pci_release_regions(pdev);
8196
8197err_out_disable:
8198 pci_disable_device(pdev);
8199 pci_set_drvdata(pdev, NULL);
8200
8201err_out:
8202 return rc;
8203}
8204
883e5151
MC
8205static char * __devinit
8206bnx2_bus_string(struct bnx2 *bp, char *str)
8207{
8208 char *s = str;
8209
f86e82fb 8210 if (bp->flags & BNX2_FLAG_PCIE) {
883e5151
MC
8211 s += sprintf(s, "PCI Express");
8212 } else {
8213 s += sprintf(s, "PCI");
f86e82fb 8214 if (bp->flags & BNX2_FLAG_PCIX)
883e5151 8215 s += sprintf(s, "-X");
f86e82fb 8216 if (bp->flags & BNX2_FLAG_PCI_32BIT)
883e5151
MC
8217 s += sprintf(s, " 32-bit");
8218 else
8219 s += sprintf(s, " 64-bit");
8220 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8221 }
8222 return str;
8223}
8224
2ba582b7 8225static void __devinit
35efa7c1
MC
8226bnx2_init_napi(struct bnx2 *bp)
8227{
b4b36042 8228 int i;
35efa7c1 8229
4327ba43 8230 for (i = 0; i < bp->irq_nvecs; i++) {
35e9010b
MC
8231 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8232 int (*poll)(struct napi_struct *, int);
8233
8234 if (i == 0)
8235 poll = bnx2_poll;
8236 else
f0ea2e63 8237 poll = bnx2_poll_msix;
35e9010b
MC
8238
8239 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
b4b36042
MC
8240 bnapi->bp = bp;
8241 }
35efa7c1
MC
8242}
8243
0421eae6
SH
8244static const struct net_device_ops bnx2_netdev_ops = {
8245 .ndo_open = bnx2_open,
8246 .ndo_start_xmit = bnx2_start_xmit,
8247 .ndo_stop = bnx2_close,
8248 .ndo_get_stats = bnx2_get_stats,
8249 .ndo_set_rx_mode = bnx2_set_rx_mode,
8250 .ndo_do_ioctl = bnx2_ioctl,
8251 .ndo_validate_addr = eth_validate_addr,
8252 .ndo_set_mac_address = bnx2_change_mac_addr,
8253 .ndo_change_mtu = bnx2_change_mtu,
8254 .ndo_tx_timeout = bnx2_tx_timeout,
8255#ifdef BCM_VLAN
8256 .ndo_vlan_rx_register = bnx2_vlan_rx_register,
8257#endif
257ddbda 8258#ifdef CONFIG_NET_POLL_CONTROLLER
0421eae6
SH
8259 .ndo_poll_controller = poll_bnx2,
8260#endif
8261};
8262
72dccb01
ED
8263static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
8264{
8265#ifdef BCM_VLAN
8266 dev->vlan_features |= flags;
8267#endif
8268}
8269
b6016b76
MC
8270static int __devinit
8271bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8272{
8273 static int version_printed = 0;
8274 struct net_device *dev = NULL;
8275 struct bnx2 *bp;
0795af57 8276 int rc;
883e5151 8277 char str[40];
b6016b76
MC
8278
8279 if (version_printed++ == 0)
3a9c6a49 8280 pr_info("%s", version);
b6016b76
MC
8281
8282 /* dev zeroed in init_etherdev */
706bf240 8283 dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
b6016b76
MC
8284
8285 if (!dev)
8286 return -ENOMEM;
8287
8288 rc = bnx2_init_board(pdev, dev);
8289 if (rc < 0) {
8290 free_netdev(dev);
8291 return rc;
8292 }
8293
0421eae6 8294 dev->netdev_ops = &bnx2_netdev_ops;
b6016b76 8295 dev->watchdog_timeo = TX_TIMEOUT;
b6016b76 8296 dev->ethtool_ops = &bnx2_ethtool_ops;
b6016b76 8297
972ec0d4 8298 bp = netdev_priv(dev);
b6016b76 8299
1b2f922f
MC
8300 pci_set_drvdata(pdev, dev);
8301
57579f76
MC
8302 rc = bnx2_request_firmware(bp);
8303 if (rc)
8304 goto error;
8305
1b2f922f
MC
8306 memcpy(dev->dev_addr, bp->mac_addr, 6);
8307 memcpy(dev->perm_addr, bp->mac_addr, 6);
1b2f922f 8308
c67938a9 8309 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_GRO;
72dccb01
ED
8310 vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG);
8311 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
d212f87b 8312 dev->features |= NETIF_F_IPV6_CSUM;
72dccb01
ED
8313 vlan_features_add(dev, NETIF_F_IPV6_CSUM);
8314 }
1b2f922f
MC
8315#ifdef BCM_VLAN
8316 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8317#endif
8318 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
72dccb01
ED
8319 vlan_features_add(dev, NETIF_F_TSO | NETIF_F_TSO_ECN);
8320 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4666f87a 8321 dev->features |= NETIF_F_TSO6;
72dccb01
ED
8322 vlan_features_add(dev, NETIF_F_TSO6);
8323 }
b6016b76 8324 if ((rc = register_netdev(dev))) {
9b91cf9d 8325 dev_err(&pdev->dev, "Cannot register net device\n");
57579f76 8326 goto error;
b6016b76
MC
8327 }
8328
3a9c6a49
JP
8329 netdev_info(dev, "%s (%c%d) %s found at mem %lx, IRQ %d, node addr %pM\n",
8330 board_info[ent->driver_data].name,
8331 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8332 ((CHIP_ID(bp) & 0x0ff0) >> 4),
8333 bnx2_bus_string(bp, str),
8334 dev->base_addr,
8335 bp->pdev->irq, dev->dev_addr);
b6016b76 8336
b6016b76 8337 return 0;
57579f76
MC
8338
8339error:
8340 if (bp->mips_firmware)
8341 release_firmware(bp->mips_firmware);
8342 if (bp->rv2p_firmware)
8343 release_firmware(bp->rv2p_firmware);
8344
8345 if (bp->regview)
8346 iounmap(bp->regview);
8347 pci_release_regions(pdev);
8348 pci_disable_device(pdev);
8349 pci_set_drvdata(pdev, NULL);
8350 free_netdev(dev);
8351 return rc;
b6016b76
MC
8352}
8353
8354static void __devexit
8355bnx2_remove_one(struct pci_dev *pdev)
8356{
8357 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 8358 struct bnx2 *bp = netdev_priv(dev);
b6016b76 8359
afdc08b9
MC
8360 flush_scheduled_work();
8361
b6016b76
MC
8362 unregister_netdev(dev);
8363
57579f76
MC
8364 if (bp->mips_firmware)
8365 release_firmware(bp->mips_firmware);
8366 if (bp->rv2p_firmware)
8367 release_firmware(bp->rv2p_firmware);
8368
b6016b76
MC
8369 if (bp->regview)
8370 iounmap(bp->regview);
8371
354fcd77
MC
8372 kfree(bp->temp_stats_blk);
8373
b6016b76
MC
8374 free_netdev(dev);
8375 pci_release_regions(pdev);
8376 pci_disable_device(pdev);
8377 pci_set_drvdata(pdev, NULL);
8378}
8379
8380static int
829ca9a3 8381bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
b6016b76
MC
8382{
8383 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 8384 struct bnx2 *bp = netdev_priv(dev);
b6016b76 8385
6caebb02
MC
8386 /* PCI register 4 needs to be saved whether netif_running() or not.
8387 * MSI address and data need to be saved if using MSI and
8388 * netif_running().
8389 */
8390 pci_save_state(pdev);
b6016b76
MC
8391 if (!netif_running(dev))
8392 return 0;
8393
1d60290f 8394 flush_scheduled_work();
212f9934 8395 bnx2_netif_stop(bp, true);
b6016b76
MC
8396 netif_device_detach(dev);
8397 del_timer_sync(&bp->timer);
74bf4ba3 8398 bnx2_shutdown_chip(bp);
b6016b76 8399 bnx2_free_skbs(bp);
829ca9a3 8400 bnx2_set_power_state(bp, pci_choose_state(pdev, state));
b6016b76
MC
8401 return 0;
8402}
8403
8404static int
8405bnx2_resume(struct pci_dev *pdev)
8406{
8407 struct net_device *dev = pci_get_drvdata(pdev);
972ec0d4 8408 struct bnx2 *bp = netdev_priv(dev);
b6016b76 8409
6caebb02 8410 pci_restore_state(pdev);
b6016b76
MC
8411 if (!netif_running(dev))
8412 return 0;
8413
829ca9a3 8414 bnx2_set_power_state(bp, PCI_D0);
b6016b76 8415 netif_device_attach(dev);
9a120bc5 8416 bnx2_init_nic(bp, 1);
212f9934 8417 bnx2_netif_start(bp, true);
b6016b76
MC
8418 return 0;
8419}
8420
6ff2da49
WX
8421/**
8422 * bnx2_io_error_detected - called when PCI error is detected
8423 * @pdev: Pointer to PCI device
8424 * @state: The current pci connection state
8425 *
8426 * This function is called after a PCI bus error affecting
8427 * this device has been detected.
8428 */
8429static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8430 pci_channel_state_t state)
8431{
8432 struct net_device *dev = pci_get_drvdata(pdev);
8433 struct bnx2 *bp = netdev_priv(dev);
8434
8435 rtnl_lock();
8436 netif_device_detach(dev);
8437
2ec3de26
DN
8438 if (state == pci_channel_io_perm_failure) {
8439 rtnl_unlock();
8440 return PCI_ERS_RESULT_DISCONNECT;
8441 }
8442
6ff2da49 8443 if (netif_running(dev)) {
212f9934 8444 bnx2_netif_stop(bp, true);
6ff2da49
WX
8445 del_timer_sync(&bp->timer);
8446 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8447 }
8448
8449 pci_disable_device(pdev);
8450 rtnl_unlock();
8451
8452 /* Request a slot slot reset. */
8453 return PCI_ERS_RESULT_NEED_RESET;
8454}
8455
8456/**
8457 * bnx2_io_slot_reset - called after the pci bus has been reset.
8458 * @pdev: Pointer to PCI device
8459 *
8460 * Restart the card from scratch, as if from a cold-boot.
8461 */
8462static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8463{
8464 struct net_device *dev = pci_get_drvdata(pdev);
8465 struct bnx2 *bp = netdev_priv(dev);
8466
8467 rtnl_lock();
8468 if (pci_enable_device(pdev)) {
8469 dev_err(&pdev->dev,
3a9c6a49 8470 "Cannot re-enable PCI device after reset\n");
6ff2da49
WX
8471 rtnl_unlock();
8472 return PCI_ERS_RESULT_DISCONNECT;
8473 }
8474 pci_set_master(pdev);
8475 pci_restore_state(pdev);
529fab67 8476 pci_save_state(pdev);
6ff2da49
WX
8477
8478 if (netif_running(dev)) {
8479 bnx2_set_power_state(bp, PCI_D0);
8480 bnx2_init_nic(bp, 1);
8481 }
8482
8483 rtnl_unlock();
8484 return PCI_ERS_RESULT_RECOVERED;
8485}
8486
8487/**
8488 * bnx2_io_resume - called when traffic can start flowing again.
8489 * @pdev: Pointer to PCI device
8490 *
8491 * This callback is called when the error recovery driver tells us that
8492 * its OK to resume normal operation.
8493 */
8494static void bnx2_io_resume(struct pci_dev *pdev)
8495{
8496 struct net_device *dev = pci_get_drvdata(pdev);
8497 struct bnx2 *bp = netdev_priv(dev);
8498
8499 rtnl_lock();
8500 if (netif_running(dev))
212f9934 8501 bnx2_netif_start(bp, true);
6ff2da49
WX
8502
8503 netif_device_attach(dev);
8504 rtnl_unlock();
8505}
8506
8507static struct pci_error_handlers bnx2_err_handler = {
8508 .error_detected = bnx2_io_error_detected,
8509 .slot_reset = bnx2_io_slot_reset,
8510 .resume = bnx2_io_resume,
8511};
8512
b6016b76 8513static struct pci_driver bnx2_pci_driver = {
14ab9b86
PH
8514 .name = DRV_MODULE_NAME,
8515 .id_table = bnx2_pci_tbl,
8516 .probe = bnx2_init_one,
8517 .remove = __devexit_p(bnx2_remove_one),
8518 .suspend = bnx2_suspend,
8519 .resume = bnx2_resume,
6ff2da49 8520 .err_handler = &bnx2_err_handler,
b6016b76
MC
8521};
8522
8523static int __init bnx2_init(void)
8524{
29917620 8525 return pci_register_driver(&bnx2_pci_driver);
b6016b76
MC
8526}
8527
8528static void __exit bnx2_cleanup(void)
8529{
8530 pci_unregister_driver(&bnx2_pci_driver);
8531}
8532
8533module_init(bnx2_init);
8534module_exit(bnx2_cleanup);
8535
8536
8537