]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
bnx2x: New microcode part 3/3
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
a2fbb9ea
ET
1/* bnx2x.c: Broadcom Everest network driver.
2 *
f1410647 3 * Copyright (c) 2007-2008 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
18/* define this to make the driver freeze on error
19 * to allow getting debug info
c14423fe 20 * (you will need to reboot afterwards)
a2fbb9ea
ET
21 */
22/*#define BNX2X_STOP_ON_ERROR*/
23
24#include <linux/module.h>
25#include <linux/moduleparam.h>
26#include <linux/kernel.h>
27#include <linux/device.h> /* for dev_info() */
28#include <linux/timer.h>
29#include <linux/errno.h>
30#include <linux/ioport.h>
31#include <linux/slab.h>
32#include <linux/vmalloc.h>
33#include <linux/interrupt.h>
34#include <linux/pci.h>
35#include <linux/init.h>
36#include <linux/netdevice.h>
37#include <linux/etherdevice.h>
38#include <linux/skbuff.h>
39#include <linux/dma-mapping.h>
40#include <linux/bitops.h>
41#include <linux/irq.h>
42#include <linux/delay.h>
43#include <asm/byteorder.h>
44#include <linux/time.h>
45#include <linux/ethtool.h>
46#include <linux/mii.h>
47#ifdef NETIF_F_HW_VLAN_TX
48 #include <linux/if_vlan.h>
49 #define BCM_VLAN 1
50#endif
51#include <net/ip.h>
52#include <net/tcp.h>
53#include <net/checksum.h>
54#include <linux/workqueue.h>
55#include <linux/crc32.h>
56#include <linux/prefetch.h>
57#include <linux/zlib.h>
58#include <linux/version.h>
59#include <linux/io.h>
60
61#include "bnx2x_reg.h"
62#include "bnx2x_fw_defs.h"
63#include "bnx2x_hsi.h"
c18487ee 64#include "bnx2x_link.h"
a2fbb9ea
ET
65#include "bnx2x.h"
66#include "bnx2x_init.h"
67
619c714c
ET
68#define DRV_MODULE_VERSION "1.42.4"
69#define DRV_MODULE_RELDATE "2008/4/9"
f1410647 70#define BNX2X_BC_VER 0x040200
a2fbb9ea
ET
71
72/* Time in jiffies before concluding the transmitter is hung. */
73#define TX_TIMEOUT (5*HZ)
74
53a10565 75static char version[] __devinitdata =
c14423fe 76 "Broadcom NetXtreme II 5771X 10Gigabit Ethernet Driver "
a2fbb9ea
ET
77 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
78
24e3fcef 79MODULE_AUTHOR("Eliezer Tamir");
a2fbb9ea
ET
80MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710 Driver");
81MODULE_LICENSE("GPL");
82MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea
ET
83
84static int use_inta;
85static int poll;
86static int onefunc;
87static int nomcp;
88static int debug;
89static int use_multi;
90
91module_param(use_inta, int, 0);
92module_param(poll, int, 0);
93module_param(onefunc, int, 0);
94module_param(debug, int, 0);
95MODULE_PARM_DESC(use_inta, "use INT#A instead of MSI-X");
96MODULE_PARM_DESC(poll, "use polling (for debug)");
97MODULE_PARM_DESC(onefunc, "enable only first function");
c14423fe
ET
98MODULE_PARM_DESC(nomcp, "ignore management CPU (Implies onefunc)");
99MODULE_PARM_DESC(debug, "default debug msglevel");
a2fbb9ea
ET
100
101#ifdef BNX2X_MULTI
102module_param(use_multi, int, 0);
103MODULE_PARM_DESC(use_multi, "use per-CPU queues");
104#endif
105
106enum bnx2x_board_type {
107 BCM57710 = 0,
108};
109
110/* indexed by board_t, above */
53a10565 111static struct {
a2fbb9ea
ET
112 char *name;
113} board_info[] __devinitdata = {
114 { "Broadcom NetXtreme II BCM57710 XGb" }
115};
116
117static const struct pci_device_id bnx2x_pci_tbl[] = {
118 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
119 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
120 { 0 }
121};
122
123MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
124
125/****************************************************************************
126* General service functions
127****************************************************************************/
128
129/* used only at init
130 * locking is done by mcp
131 */
132static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
133{
134 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
135 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
136 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
137 PCICFG_VENDOR_ID_OFFSET);
138}
139
a2fbb9ea
ET
140static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
141{
142 u32 val;
143
144 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
145 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
146 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
147 PCICFG_VENDOR_ID_OFFSET);
148
149 return val;
150}
a2fbb9ea
ET
151
152static const u32 dmae_reg_go_c[] = {
153 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
154 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
155 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
156 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
157};
158
159/* copy command into DMAE command memory and set DMAE command go */
160static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
161 int idx)
162{
163 u32 cmd_offset;
164 int i;
165
166 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
167 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
168 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
169
ad8d3948
EG
170 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
171 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
172 }
173 REG_WR(bp, dmae_reg_go_c[idx], 1);
174}
175
ad8d3948
EG
176void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
177 u32 len32)
a2fbb9ea 178{
ad8d3948 179 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 180 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
181 int cnt = 200;
182
183 if (!bp->dmae_ready) {
184 u32 *data = bnx2x_sp(bp, wb_data[0]);
185
186 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
187 " using indirect\n", dst_addr, len32);
188 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
189 return;
190 }
191
192 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
193
194 memset(dmae, 0, sizeof(struct dmae_command));
195
196 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
197 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
198 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
199#ifdef __BIG_ENDIAN
200 DMAE_CMD_ENDIANITY_B_DW_SWAP |
201#else
202 DMAE_CMD_ENDIANITY_DW_SWAP |
203#endif
ad8d3948 204 (bp->port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
a2fbb9ea
ET
205 dmae->src_addr_lo = U64_LO(dma_addr);
206 dmae->src_addr_hi = U64_HI(dma_addr);
207 dmae->dst_addr_lo = dst_addr >> 2;
208 dmae->dst_addr_hi = 0;
209 dmae->len = len32;
210 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
211 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 212 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 213
ad8d3948 214 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
215 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
216 "dst_addr [%x:%08x (%08x)]\n"
217 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
218 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
219 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
220 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 221 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
222 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
223 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
224
225 *wb_comp = 0;
226
ad8d3948 227 bnx2x_post_dmae(bp, dmae, (bp->port)*MAX_DMAE_C_PER_PORT);
a2fbb9ea
ET
228
229 udelay(5);
ad8d3948
EG
230
231 while (*wb_comp != DMAE_COMP_VAL) {
232 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
233
234 /* adjust delay for emulation/FPGA */
235 if (CHIP_REV_IS_SLOW(bp))
236 msleep(100);
237 else
238 udelay(5);
239
240 if (!cnt) {
a2fbb9ea
ET
241 BNX2X_ERR("dmae timeout!\n");
242 break;
243 }
ad8d3948 244 cnt--;
a2fbb9ea 245 }
ad8d3948
EG
246
247 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
248}
249
c18487ee 250void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 251{
ad8d3948 252 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 253 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
254 int cnt = 200;
255
256 if (!bp->dmae_ready) {
257 u32 *data = bnx2x_sp(bp, wb_data[0]);
258 int i;
259
260 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
261 " using indirect\n", src_addr, len32);
262 for (i = 0; i < len32; i++)
263 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
264 return;
265 }
266
267 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
268
269 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
270 memset(dmae, 0, sizeof(struct dmae_command));
271
272 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
273 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
274 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
275#ifdef __BIG_ENDIAN
276 DMAE_CMD_ENDIANITY_B_DW_SWAP |
277#else
278 DMAE_CMD_ENDIANITY_DW_SWAP |
279#endif
ad8d3948 280 (bp->port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
a2fbb9ea
ET
281 dmae->src_addr_lo = src_addr >> 2;
282 dmae->src_addr_hi = 0;
283 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
284 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
285 dmae->len = len32;
286 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
287 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 288 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 289
ad8d3948 290 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
291 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
292 "dst_addr [%x:%08x (%08x)]\n"
293 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
294 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
295 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
296 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
297
298 *wb_comp = 0;
299
ad8d3948 300 bnx2x_post_dmae(bp, dmae, (bp->port)*MAX_DMAE_C_PER_PORT);
a2fbb9ea
ET
301
302 udelay(5);
ad8d3948
EG
303
304 while (*wb_comp != DMAE_COMP_VAL) {
305
306 /* adjust delay for emulation/FPGA */
307 if (CHIP_REV_IS_SLOW(bp))
308 msleep(100);
309 else
310 udelay(5);
311
312 if (!cnt) {
a2fbb9ea
ET
313 BNX2X_ERR("dmae timeout!\n");
314 break;
315 }
ad8d3948 316 cnt--;
a2fbb9ea 317 }
ad8d3948 318 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
319 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
320 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
321
322 mutex_unlock(&bp->dmae_mutex);
323}
324
325/* used only for slowpath so not inlined */
326static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
327{
328 u32 wb_write[2];
329
330 wb_write[0] = val_hi;
331 wb_write[1] = val_lo;
332 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 333}
a2fbb9ea 334
ad8d3948
EG
335#ifdef USE_WB_RD
336static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
337{
338 u32 wb_data[2];
339
340 REG_RD_DMAE(bp, reg, wb_data, 2);
341
342 return HILO_U64(wb_data[0], wb_data[1]);
343}
344#endif
345
a2fbb9ea
ET
346static int bnx2x_mc_assert(struct bnx2x *bp)
347{
49d66772 348 int i, j, rc = 0;
a2fbb9ea
ET
349 char last_idx;
350 const char storm[] = {"XTCU"};
351 const u32 intmem_base[] = {
352 BAR_XSTRORM_INTMEM,
353 BAR_TSTRORM_INTMEM,
354 BAR_CSTRORM_INTMEM,
355 BAR_USTRORM_INTMEM
356 };
357
358 /* Go through all instances of all SEMIs */
359 for (i = 0; i < 4; i++) {
360 last_idx = REG_RD8(bp, XSTORM_ASSERT_LIST_INDEX_OFFSET +
361 intmem_base[i]);
49d66772
ET
362 if (last_idx)
363 BNX2X_LOG("DATA %cSTORM_ASSERT_LIST_INDEX 0x%x\n",
364 storm[i], last_idx);
a2fbb9ea
ET
365
366 /* print the asserts */
367 for (j = 0; j < STROM_ASSERT_ARRAY_SIZE; j++) {
368 u32 row0, row1, row2, row3;
369
370 row0 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) +
371 intmem_base[i]);
372 row1 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 4 +
373 intmem_base[i]);
374 row2 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 8 +
375 intmem_base[i]);
376 row3 = REG_RD(bp, XSTORM_ASSERT_LIST_OFFSET(j) + 12 +
377 intmem_base[i]);
378
379 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
49d66772 380 BNX2X_LOG("DATA %cSTORM_ASSERT_INDEX 0x%x ="
a2fbb9ea
ET
381 " 0x%08x 0x%08x 0x%08x 0x%08x\n",
382 storm[i], j, row3, row2, row1, row0);
383 rc++;
384 } else {
385 break;
386 }
387 }
388 }
389 return rc;
390}
c14423fe 391
a2fbb9ea
ET
392static void bnx2x_fw_dump(struct bnx2x *bp)
393{
394 u32 mark, offset;
395 u32 data[9];
396 int word;
397
398 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
399 mark = ((mark + 0x3) & ~0x3);
400 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
401
402 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
403 for (word = 0; word < 8; word++)
404 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
405 offset + 4*word));
406 data[8] = 0x0;
49d66772 407 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
408 }
409 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
410 for (word = 0; word < 8; word++)
411 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
412 offset + 4*word));
413 data[8] = 0x0;
49d66772 414 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
415 }
416 printk("\n" KERN_ERR PFX "end of fw dump\n");
417}
418
419static void bnx2x_panic_dump(struct bnx2x *bp)
420{
421 int i;
422 u16 j, start, end;
423
424 BNX2X_ERR("begin crash dump -----------------\n");
425
426 for_each_queue(bp, i) {
427 struct bnx2x_fastpath *fp = &bp->fp[i];
428 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
429
430 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
431 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)"
432 " *rx_cons_sb(%x) rx_comp_prod(%x)"
433 " rx_comp_cons(%x) fp_c_idx(%x) fp_u_idx(%x)"
434 " bd data(%x,%x)\n",
435 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
436 fp->tx_bd_cons, *fp->tx_cons_sb, *fp->rx_cons_sb,
437 fp->rx_comp_prod, fp->rx_comp_cons, fp->fp_c_idx,
438 fp->fp_u_idx, hw_prods->packets_prod,
439 hw_prods->bds_prod);
440
441 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
442 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
443 for (j = start; j < end; j++) {
444 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
445
446 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
447 sw_bd->skb, sw_bd->first_bd);
448 }
449
450 start = TX_BD(fp->tx_bd_cons - 10);
451 end = TX_BD(fp->tx_bd_cons + 254);
452 for (j = start; j < end; j++) {
453 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
454
455 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
456 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
457 }
458
459 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
460 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
461 for (j = start; j < end; j++) {
462 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
463 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
464
465 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
466 j, rx_bd[0], rx_bd[1], sw_bd->skb);
467 }
468
469 start = RCQ_BD(fp->rx_comp_cons - 10);
470 end = RCQ_BD(fp->rx_comp_cons + 503);
471 for (j = start; j < end; j++) {
472 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
473
474 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
475 j, cqe[0], cqe[1], cqe[2], cqe[3]);
476 }
477 }
478
49d66772
ET
479 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
480 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
a2fbb9ea 481 " spq_prod_idx(%u)\n",
49d66772 482 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
a2fbb9ea
ET
483 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
484
485
486 bnx2x_mc_assert(bp);
487 BNX2X_ERR("end crash dump -----------------\n");
488
489 bp->stats_state = STATS_STATE_DISABLE;
490 DP(BNX2X_MSG_STATS, "stats_state - DISABLE\n");
491}
492
615f8fd9 493static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea
ET
494{
495 int port = bp->port;
496 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
497 u32 val = REG_RD(bp, addr);
498 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
499
500 if (msix) {
501 val &= ~HC_CONFIG_0_REG_SINGLE_ISR_EN_0;
502 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
503 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
504 } else {
505 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 506 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
507 HC_CONFIG_0_REG_INT_LINE_EN_0 |
508 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9
ET
509
510 /* Errata A0.158 workaround */
511 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
512 val, port, addr, msix);
513
514 REG_WR(bp, addr, val);
515
a2fbb9ea
ET
516 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
517 }
518
615f8fd9 519 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) MSI-X %d\n",
a2fbb9ea
ET
520 val, port, addr, msix);
521
522 REG_WR(bp, addr, val);
523}
524
615f8fd9 525static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea
ET
526{
527 int port = bp->port;
528 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
529 u32 val = REG_RD(bp, addr);
530
531 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
532 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
533 HC_CONFIG_0_REG_INT_LINE_EN_0 |
534 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
535
536 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
537 val, port, addr);
538
539 REG_WR(bp, addr, val);
540 if (REG_RD(bp, addr) != val)
541 BNX2X_ERR("BUG! proper val not read from IGU!\n");
542}
543
615f8fd9 544static void bnx2x_int_disable_sync(struct bnx2x *bp)
a2fbb9ea
ET
545{
546
547 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
548 int i;
549
550 atomic_inc(&bp->intr_sem);
c14423fe 551 /* prevent the HW from sending interrupts */
615f8fd9 552 bnx2x_int_disable(bp);
a2fbb9ea
ET
553
554 /* make sure all ISRs are done */
555 if (msix) {
556 for_each_queue(bp, i)
557 synchronize_irq(bp->msix_table[i].vector);
558
559 /* one more for the Slow Path IRQ */
560 synchronize_irq(bp->msix_table[i].vector);
561 } else
562 synchronize_irq(bp->pdev->irq);
563
564 /* make sure sp_task is not running */
565 cancel_work_sync(&bp->sp_task);
566
567}
568
569/* fast path code */
570
571/*
572 * general service functions
573 */
574
575static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 id,
576 u8 storm, u16 index, u8 op, u8 update)
577{
578 u32 igu_addr = (IGU_ADDR_INT_ACK + IGU_PORT_BASE * bp->port) * 8;
579 struct igu_ack_register igu_ack;
580
581 igu_ack.status_block_index = index;
582 igu_ack.sb_id_and_flags =
583 ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
584 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
585 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
586 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
587
588/* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
589 (*(u32 *)&igu_ack), BAR_IGU_INTMEM + igu_addr); */
590 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, (*(u32 *)&igu_ack));
591}
592
593static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
594{
595 struct host_status_block *fpsb = fp->status_blk;
596 u16 rc = 0;
597
598 barrier(); /* status block is written to by the chip */
599 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
600 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
601 rc |= 1;
602 }
603 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
604 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
605 rc |= 2;
606 }
607 return rc;
608}
609
610static inline int bnx2x_has_work(struct bnx2x_fastpath *fp)
611{
612 u16 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
613
614 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
615 rx_cons_sb++;
616
617 if ((rx_cons_sb != fp->rx_comp_cons) ||
618 (le16_to_cpu(*fp->tx_cons_sb) != fp->tx_pkt_cons))
619 return 1;
620
621 return 0;
622}
623
624static u16 bnx2x_ack_int(struct bnx2x *bp)
625{
626 u32 igu_addr = (IGU_ADDR_SIMD_MASK + IGU_PORT_BASE * bp->port) * 8;
627 u32 result = REG_RD(bp, BAR_IGU_INTMEM + igu_addr);
628
629/* DP(NETIF_MSG_INTR, "read 0x%08x from IGU addr 0x%x\n",
630 result, BAR_IGU_INTMEM + igu_addr); */
631
632#ifdef IGU_DEBUG
633#warning IGU_DEBUG active
634 if (result == 0) {
635 BNX2X_ERR("read %x from IGU\n", result);
636 REG_WR(bp, TM_REG_TIMER_SOFT_RST, 0);
637 }
638#endif
639 return result;
640}
641
642
643/*
644 * fast path service functions
645 */
646
647/* free skb in the packet ring at pos idx
648 * return idx of last bd freed
649 */
650static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
651 u16 idx)
652{
653 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
654 struct eth_tx_bd *tx_bd;
655 struct sk_buff *skb = tx_buf->skb;
656 u16 bd_idx = tx_buf->first_bd;
657 int nbd;
658
659 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
660 idx, tx_buf, skb);
661
662 /* unmap first bd */
663 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
664 tx_bd = &fp->tx_desc_ring[bd_idx];
665 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
666 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
667
668 nbd = le16_to_cpu(tx_bd->nbd) - 1;
669#ifdef BNX2X_STOP_ON_ERROR
670 if (nbd > (MAX_SKB_FRAGS + 2)) {
671 BNX2X_ERR("bad nbd!\n");
672 bnx2x_panic();
673 }
674#endif
675
676 /* Skip a parse bd and the TSO split header bd
677 since they have no mapping */
678 if (nbd)
679 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
680
681 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
682 ETH_TX_BD_FLAGS_TCP_CSUM |
683 ETH_TX_BD_FLAGS_SW_LSO)) {
684 if (--nbd)
685 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
686 tx_bd = &fp->tx_desc_ring[bd_idx];
687 /* is this a TSO split header bd? */
688 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
689 if (--nbd)
690 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
691 }
692 }
693
694 /* now free frags */
695 while (nbd > 0) {
696
697 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
698 tx_bd = &fp->tx_desc_ring[bd_idx];
699 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
700 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
701 if (--nbd)
702 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
703 }
704
705 /* release skb */
706 BUG_TRAP(skb);
707 dev_kfree_skb(skb);
708 tx_buf->first_bd = 0;
709 tx_buf->skb = NULL;
710
711 return bd_idx;
712}
713
714static inline u32 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
715{
716 u16 used;
717 u32 prod;
718 u32 cons;
719
720 /* Tell compiler that prod and cons can change */
721 barrier();
722 prod = fp->tx_bd_prod;
723 cons = fp->tx_bd_cons;
724
725 used = (NUM_TX_BD - NUM_TX_RINGS + prod - cons +
726 (cons / TX_DESC_CNT) - (prod / TX_DESC_CNT));
727
728 if (prod >= cons) {
729 /* used = prod - cons - prod/size + cons/size */
730 used -= NUM_TX_BD - NUM_TX_RINGS;
731 }
732
733 BUG_TRAP(used <= fp->bp->tx_ring_size);
734 BUG_TRAP((fp->bp->tx_ring_size - used) <= MAX_TX_AVAIL);
735
736 return (fp->bp->tx_ring_size - used);
737}
738
739static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
740{
741 struct bnx2x *bp = fp->bp;
742 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
743 int done = 0;
744
745#ifdef BNX2X_STOP_ON_ERROR
746 if (unlikely(bp->panic))
747 return;
748#endif
749
750 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
751 sw_cons = fp->tx_pkt_cons;
752
753 while (sw_cons != hw_cons) {
754 u16 pkt_cons;
755
756 pkt_cons = TX_BD(sw_cons);
757
758 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
759
760 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %d\n",
761 hw_cons, sw_cons, pkt_cons);
762
763/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
764 rmb();
765 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
766 }
767*/
768 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
769 sw_cons++;
770 done++;
771
772 if (done == work)
773 break;
774 }
775
776 fp->tx_pkt_cons = sw_cons;
777 fp->tx_bd_cons = bd_cons;
778
779 /* Need to make the tx_cons update visible to start_xmit()
780 * before checking for netif_queue_stopped(). Without the
781 * memory barrier, there is a small possibility that start_xmit()
782 * will miss it and cause the queue to be stopped forever.
783 */
784 smp_mb();
785
786 /* TBD need a thresh? */
787 if (unlikely(netif_queue_stopped(bp->dev))) {
788
789 netif_tx_lock(bp->dev);
790
791 if (netif_queue_stopped(bp->dev) &&
792 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
793 netif_wake_queue(bp->dev);
794
795 netif_tx_unlock(bp->dev);
796
797 }
798}
799
800static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
801 union eth_rx_cqe *rr_cqe)
802{
803 struct bnx2x *bp = fp->bp;
804 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
805 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
806
807 DP(NETIF_MSG_RX_STATUS,
808 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
809 fp->index, cid, command, bp->state, rr_cqe->ramrod_cqe.type);
810
811 bp->spq_left++;
812
813 if (fp->index) {
814 switch (command | fp->state) {
815 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
816 BNX2X_FP_STATE_OPENING):
817 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
818 cid);
819 fp->state = BNX2X_FP_STATE_OPEN;
820 break;
821
822 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
823 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
824 cid);
825 fp->state = BNX2X_FP_STATE_HALTED;
826 break;
827
828 default:
829 BNX2X_ERR("unexpected MC reply(%d) state is %x\n",
830 command, fp->state);
831 }
832 mb(); /* force bnx2x_wait_ramrod to see the change */
833 return;
834 }
c14423fe 835
a2fbb9ea
ET
836 switch (command | bp->state) {
837 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
838 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
839 bp->state = BNX2X_STATE_OPEN;
840 break;
841
842 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
843 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
844 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
845 fp->state = BNX2X_FP_STATE_HALTED;
846 break;
847
a2fbb9ea 848 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
49d66772
ET
849 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n",
850 cid);
851 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
852 break;
853
854 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
855 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
856 break;
857
49d66772
ET
858 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
859 DP(NETIF_MSG_IFUP, "got (un)set mac ramrod\n");
860 break;
861
a2fbb9ea
ET
862 default:
863 BNX2X_ERR("unexpected ramrod (%d) state is %x\n",
864 command, bp->state);
865 }
866
867 mb(); /* force bnx2x_wait_ramrod to see the change */
868}
869
870static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
871 struct bnx2x_fastpath *fp, u16 index)
872{
873 struct sk_buff *skb;
874 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
875 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
876 dma_addr_t mapping;
877
878 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
879 if (unlikely(skb == NULL))
880 return -ENOMEM;
881
882 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
883 PCI_DMA_FROMDEVICE);
884 if (unlikely(dma_mapping_error(mapping))) {
885
886 dev_kfree_skb(skb);
887 return -ENOMEM;
888 }
889
890 rx_buf->skb = skb;
891 pci_unmap_addr_set(rx_buf, mapping, mapping);
892
893 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
894 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
895
896 return 0;
897}
898
899/* note that we are not allocating a new skb,
900 * we are just moving one from cons to prod
901 * we are not creating a new mapping,
902 * so there is no need to check for dma_mapping_error().
903 */
904static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
905 struct sk_buff *skb, u16 cons, u16 prod)
906{
907 struct bnx2x *bp = fp->bp;
908 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
909 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
910 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
911 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
912
913 pci_dma_sync_single_for_device(bp->pdev,
914 pci_unmap_addr(cons_rx_buf, mapping),
915 bp->rx_offset + RX_COPY_THRESH,
916 PCI_DMA_FROMDEVICE);
917
918 prod_rx_buf->skb = cons_rx_buf->skb;
919 pci_unmap_addr_set(prod_rx_buf, mapping,
920 pci_unmap_addr(cons_rx_buf, mapping));
921 *prod_bd = *cons_bd;
922}
923
924static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
925{
926 struct bnx2x *bp = fp->bp;
927 u16 bd_cons, bd_prod, comp_ring_cons;
928 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
929 int rx_pkt = 0;
930
931#ifdef BNX2X_STOP_ON_ERROR
932 if (unlikely(bp->panic))
933 return 0;
934#endif
935
936 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
937 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
938 hw_comp_cons++;
939
940 bd_cons = fp->rx_bd_cons;
941 bd_prod = fp->rx_bd_prod;
942 sw_comp_cons = fp->rx_comp_cons;
943 sw_comp_prod = fp->rx_comp_prod;
944
945 /* Memory barrier necessary as speculative reads of the rx
946 * buffer can be ahead of the index in the status block
947 */
948 rmb();
949
950 DP(NETIF_MSG_RX_STATUS,
951 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
952 fp->index, hw_comp_cons, sw_comp_cons);
953
954 while (sw_comp_cons != hw_comp_cons) {
955 unsigned int len, pad;
956 struct sw_rx_bd *rx_buf;
957 struct sk_buff *skb;
958 union eth_rx_cqe *cqe;
959
960 comp_ring_cons = RCQ_BD(sw_comp_cons);
961 bd_prod = RX_BD(bd_prod);
962 bd_cons = RX_BD(bd_cons);
963
964 cqe = &fp->rx_comp_ring[comp_ring_cons];
965
966 DP(NETIF_MSG_RX_STATUS, "hw_comp_cons %u sw_comp_cons %u"
967 " comp_ring (%u) bd_ring (%u,%u)\n",
968 hw_comp_cons, sw_comp_cons,
969 comp_ring_cons, bd_prod, bd_cons);
970 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
971 " queue %x vlan %x len %x\n",
972 cqe->fast_path_cqe.type,
973 cqe->fast_path_cqe.error_type_flags,
974 cqe->fast_path_cqe.status_flags,
975 cqe->fast_path_cqe.rss_hash_result,
976 cqe->fast_path_cqe.vlan_tag, cqe->fast_path_cqe.pkt_len);
977
978 /* is this a slowpath msg? */
979 if (unlikely(cqe->fast_path_cqe.type)) {
980 bnx2x_sp_event(fp, cqe);
981 goto next_cqe;
982
983 /* this is an rx packet */
984 } else {
985 rx_buf = &fp->rx_buf_ring[bd_cons];
986 skb = rx_buf->skb;
987
988 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
989 pad = cqe->fast_path_cqe.placement_offset;
990
991 pci_dma_sync_single_for_device(bp->pdev,
992 pci_unmap_addr(rx_buf, mapping),
993 pad + RX_COPY_THRESH,
994 PCI_DMA_FROMDEVICE);
995 prefetch(skb);
996 prefetch(((char *)(skb)) + 128);
997
998 /* is this an error packet? */
999 if (unlikely(cqe->fast_path_cqe.error_type_flags &
1000 ETH_RX_ERROR_FALGS)) {
1001 /* do we sometimes forward error packets anyway? */
1002 DP(NETIF_MSG_RX_ERR,
1003 "ERROR flags(%u) Rx packet(%u)\n",
1004 cqe->fast_path_cqe.error_type_flags,
1005 sw_comp_cons);
1006 /* TBD make sure MC counts this as a drop */
1007 goto reuse_rx;
1008 }
1009
1010 /* Since we don't have a jumbo ring
1011 * copy small packets if mtu > 1500
1012 */
1013 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1014 (len <= RX_COPY_THRESH)) {
1015 struct sk_buff *new_skb;
1016
1017 new_skb = netdev_alloc_skb(bp->dev,
1018 len + pad);
1019 if (new_skb == NULL) {
1020 DP(NETIF_MSG_RX_ERR,
1021 "ERROR packet dropped "
1022 "because of alloc failure\n");
1023 /* TBD count this as a drop? */
1024 goto reuse_rx;
1025 }
1026
1027 /* aligned copy */
1028 skb_copy_from_linear_data_offset(skb, pad,
1029 new_skb->data + pad, len);
1030 skb_reserve(new_skb, pad);
1031 skb_put(new_skb, len);
1032
1033 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1034
1035 skb = new_skb;
1036
1037 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1038 pci_unmap_single(bp->pdev,
1039 pci_unmap_addr(rx_buf, mapping),
1040 bp->rx_buf_use_size,
1041 PCI_DMA_FROMDEVICE);
1042 skb_reserve(skb, pad);
1043 skb_put(skb, len);
1044
1045 } else {
1046 DP(NETIF_MSG_RX_ERR,
1047 "ERROR packet dropped because "
1048 "of alloc failure\n");
1049reuse_rx:
1050 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1051 goto next_rx;
1052 }
1053
1054 skb->protocol = eth_type_trans(skb, bp->dev);
1055
1056 skb->ip_summed = CHECKSUM_NONE;
1057 if (bp->rx_csum && BNX2X_RX_SUM_OK(cqe))
1058 skb->ip_summed = CHECKSUM_UNNECESSARY;
1059
1060 /* TBD do we pass bad csum packets in promisc */
1061 }
1062
1063#ifdef BCM_VLAN
1064 if ((le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags)
1065 & PARSING_FLAGS_NUMBER_OF_NESTED_VLANS)
1066 && (bp->vlgrp != NULL))
1067 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1068 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1069 else
1070#endif
1071 netif_receive_skb(skb);
1072
1073 bp->dev->last_rx = jiffies;
1074
1075next_rx:
1076 rx_buf->skb = NULL;
1077
1078 bd_cons = NEXT_RX_IDX(bd_cons);
1079 bd_prod = NEXT_RX_IDX(bd_prod);
1080next_cqe:
1081 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1082 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1083 rx_pkt++;
1084
1085 if ((rx_pkt == budget))
1086 break;
1087 } /* while */
1088
1089 fp->rx_bd_cons = bd_cons;
1090 fp->rx_bd_prod = bd_prod;
1091 fp->rx_comp_cons = sw_comp_cons;
1092 fp->rx_comp_prod = sw_comp_prod;
1093
1094 REG_WR(bp, BAR_TSTRORM_INTMEM +
1095 TSTORM_RCQ_PROD_OFFSET(bp->port, fp->index), sw_comp_prod);
1096
1097 mmiowb(); /* keep prod updates ordered */
1098
1099 fp->rx_pkt += rx_pkt;
1100 fp->rx_calls++;
1101
1102 return rx_pkt;
1103}
1104
1105static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1106{
1107 struct bnx2x_fastpath *fp = fp_cookie;
1108 struct bnx2x *bp = fp->bp;
1109 struct net_device *dev = bp->dev;
1110 int index = fp->index;
1111
1112 DP(NETIF_MSG_INTR, "got an msix interrupt on [%d]\n", index);
1113 bnx2x_ack_sb(bp, index, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1114
1115#ifdef BNX2X_STOP_ON_ERROR
1116 if (unlikely(bp->panic))
1117 return IRQ_HANDLED;
1118#endif
1119
1120 prefetch(fp->rx_cons_sb);
1121 prefetch(fp->tx_cons_sb);
1122 prefetch(&fp->status_blk->c_status_block.status_block_index);
1123 prefetch(&fp->status_blk->u_status_block.status_block_index);
1124
1125 netif_rx_schedule(dev, &bnx2x_fp(bp, index, napi));
1126 return IRQ_HANDLED;
1127}
1128
1129static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1130{
1131 struct net_device *dev = dev_instance;
1132 struct bnx2x *bp = netdev_priv(dev);
1133 u16 status = bnx2x_ack_int(bp);
1134
1135 if (unlikely(status == 0)) {
1136 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1137 return IRQ_NONE;
1138 }
1139
1140 DP(NETIF_MSG_INTR, "got an interrupt status is %u\n", status);
1141
1142#ifdef BNX2X_STOP_ON_ERROR
1143 if (unlikely(bp->panic))
1144 return IRQ_HANDLED;
1145#endif
1146
1147 /* Return here if interrupt is shared and is disabled */
1148 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1149 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1150 return IRQ_HANDLED;
1151 }
1152
1153 if (status & 0x2) {
1154 struct bnx2x_fastpath *fp = &bp->fp[0];
1155
1156 prefetch(fp->rx_cons_sb);
1157 prefetch(fp->tx_cons_sb);
1158 prefetch(&fp->status_blk->c_status_block.status_block_index);
1159 prefetch(&fp->status_blk->u_status_block.status_block_index);
1160
1161 netif_rx_schedule(dev, &bnx2x_fp(bp, 0, napi));
1162
1163 status &= ~0x2;
1164 if (!status)
1165 return IRQ_HANDLED;
1166 }
1167
1168 if (unlikely(status & 0x1)) {
1169
1170 schedule_work(&bp->sp_task);
1171
1172 status &= ~0x1;
1173 if (!status)
1174 return IRQ_HANDLED;
1175 }
1176
1177 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status is %u)\n",
1178 status);
1179
c18487ee 1180 return IRQ_HANDLED;
a2fbb9ea
ET
1181}
1182
c18487ee 1183/* end of fast path */
a2fbb9ea 1184
a2fbb9ea 1185
c18487ee
YR
1186/* Link */
1187
1188/*
1189 * General service functions
1190 */
a2fbb9ea 1191
c18487ee
YR
1192static int bnx2x_hw_lock(struct bnx2x *bp, u32 resource)
1193{
1194 u32 lock_status;
1195 u32 resource_bit = (1 << resource);
1196 u8 port = bp->port;
1197 int cnt;
a2fbb9ea 1198
c18487ee
YR
1199 /* Validating that the resource is within range */
1200 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1201 DP(NETIF_MSG_HW,
1202 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1203 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1204 return -EINVAL;
1205 }
a2fbb9ea 1206
c18487ee
YR
1207 /* Validating that the resource is not already taken */
1208 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1209 if (lock_status & resource_bit) {
1210 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1211 lock_status, resource_bit);
1212 return -EEXIST;
1213 }
a2fbb9ea 1214
c18487ee
YR
1215 /* Try for 1 second every 5ms */
1216 for (cnt = 0; cnt < 200; cnt++) {
1217 /* Try to acquire the lock */
1218 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8 + 4,
1219 resource_bit);
1220 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1221 if (lock_status & resource_bit)
1222 return 0;
a2fbb9ea 1223
c18487ee 1224 msleep(5);
a2fbb9ea 1225 }
c18487ee
YR
1226 DP(NETIF_MSG_HW, "Timeout\n");
1227 return -EAGAIN;
1228}
a2fbb9ea 1229
c18487ee
YR
1230static int bnx2x_hw_unlock(struct bnx2x *bp, u32 resource)
1231{
1232 u32 lock_status;
1233 u32 resource_bit = (1 << resource);
1234 u8 port = bp->port;
a2fbb9ea 1235
c18487ee
YR
1236 /* Validating that the resource is within range */
1237 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1238 DP(NETIF_MSG_HW,
1239 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1240 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1241 return -EINVAL;
1242 }
1243
1244 /* Validating that the resource is currently taken */
1245 lock_status = REG_RD(bp, MISC_REG_DRIVER_CONTROL_1 + port*8);
1246 if (!(lock_status & resource_bit)) {
1247 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1248 lock_status, resource_bit);
1249 return -EFAULT;
a2fbb9ea
ET
1250 }
1251
c18487ee
YR
1252 REG_WR(bp, MISC_REG_DRIVER_CONTROL_1 + port*8, resource_bit);
1253 return 0;
1254}
1255
1256/* HW Lock for shared dual port PHYs */
1257static void bnx2x_phy_hw_lock(struct bnx2x *bp)
1258{
1259 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1260
c18487ee 1261 mutex_lock(&bp->phy_mutex);
a2fbb9ea 1262
c18487ee
YR
1263 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1264 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1265 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
1266}
a2fbb9ea 1267
c18487ee
YR
1268static void bnx2x_phy_hw_unlock(struct bnx2x *bp)
1269{
1270 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1271
c18487ee
YR
1272 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1273 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
1274 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_8072_MDIO);
a2fbb9ea 1275
c18487ee
YR
1276 mutex_unlock(&bp->phy_mutex);
1277}
a2fbb9ea 1278
c18487ee
YR
1279int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode)
1280{
1281 /* The GPIO should be swapped if swap register is set and active */
1282 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1283 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ bp->port;
1284 int gpio_shift = gpio_num +
1285 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1286 u32 gpio_mask = (1 << gpio_shift);
1287 u32 gpio_reg;
a2fbb9ea 1288
c18487ee
YR
1289 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1290 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1291 return -EINVAL;
1292 }
a2fbb9ea 1293
c18487ee
YR
1294 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1295 /* read GPIO and mask except the float bits */
1296 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1297
c18487ee
YR
1298 switch (mode) {
1299 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1300 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1301 gpio_num, gpio_shift);
1302 /* clear FLOAT and set CLR */
1303 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1304 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1305 break;
a2fbb9ea 1306
c18487ee
YR
1307 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1308 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1309 gpio_num, gpio_shift);
1310 /* clear FLOAT and set SET */
1311 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1312 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1313 break;
a2fbb9ea 1314
c18487ee
YR
1315 case MISC_REGISTERS_GPIO_INPUT_HI_Z :
1316 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1317 gpio_num, gpio_shift);
1318 /* set FLOAT */
1319 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1320 break;
a2fbb9ea 1321
c18487ee
YR
1322 default:
1323 break;
a2fbb9ea
ET
1324 }
1325
c18487ee
YR
1326 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1327 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1328
c18487ee 1329 return 0;
a2fbb9ea
ET
1330}
1331
c18487ee 1332static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1333{
c18487ee
YR
1334 u32 spio_mask = (1 << spio_num);
1335 u32 spio_reg;
a2fbb9ea 1336
c18487ee
YR
1337 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1338 (spio_num > MISC_REGISTERS_SPIO_7)) {
1339 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1340 return -EINVAL;
a2fbb9ea
ET
1341 }
1342
c18487ee
YR
1343 bnx2x_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1344 /* read SPIO and mask except the float bits */
1345 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1346
c18487ee
YR
1347 switch (mode) {
1348 case MISC_REGISTERS_SPIO_OUTPUT_LOW :
1349 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1350 /* clear FLOAT and set CLR */
1351 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1352 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1353 break;
a2fbb9ea 1354
c18487ee
YR
1355 case MISC_REGISTERS_SPIO_OUTPUT_HIGH :
1356 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1357 /* clear FLOAT and set SET */
1358 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1359 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1360 break;
a2fbb9ea 1361
c18487ee
YR
1362 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1363 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1364 /* set FLOAT */
1365 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1366 break;
a2fbb9ea 1367
c18487ee
YR
1368 default:
1369 break;
a2fbb9ea
ET
1370 }
1371
c18487ee
YR
1372 REG_WR(bp, MISC_REG_SPIO, spio_reg);
1373 bnx2x_hw_unlock(bp, HW_LOCK_RESOURCE_SPIO);
1374
a2fbb9ea
ET
1375 return 0;
1376}
1377
c18487ee 1378static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1379{
c18487ee
YR
1380 switch (bp->link_vars.ieee_fc) {
1381 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1382 bp->advertising &= ~(ADVERTISED_Asym_Pause |
1383 ADVERTISED_Pause);
1384 break;
1385 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1386 bp->advertising |= (ADVERTISED_Asym_Pause |
1387 ADVERTISED_Pause);
1388 break;
1389 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1390 bp->advertising |= ADVERTISED_Asym_Pause;
1391 break;
1392 default:
1393 bp->advertising &= ~(ADVERTISED_Asym_Pause |
1394 ADVERTISED_Pause);
1395 break;
1396 }
1397}
f1410647 1398
c18487ee
YR
1399static void bnx2x_link_report(struct bnx2x *bp)
1400{
1401 if (bp->link_vars.link_up) {
1402 if (bp->state == BNX2X_STATE_OPEN)
1403 netif_carrier_on(bp->dev);
1404 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 1405
c18487ee 1406 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 1407
c18487ee
YR
1408 if (bp->link_vars.duplex == DUPLEX_FULL)
1409 printk("full duplex");
1410 else
1411 printk("half duplex");
f1410647 1412
c18487ee
YR
1413 if (bp->link_vars.flow_ctrl != FLOW_CTRL_NONE) {
1414 if (bp->link_vars.flow_ctrl & FLOW_CTRL_RX) {
1415 printk(", receive ");
1416 if (bp->link_vars.flow_ctrl & FLOW_CTRL_TX)
1417 printk("& transmit ");
1418 } else {
1419 printk(", transmit ");
1420 }
1421 printk("flow control ON");
1422 }
1423 printk("\n");
f1410647 1424
c18487ee
YR
1425 } else { /* link_down */
1426 netif_carrier_off(bp->dev);
1427 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 1428 }
c18487ee
YR
1429}
1430
1431static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1432{
1433 u8 rc;
a2fbb9ea 1434
c18487ee
YR
1435 /* Initialize link parameters structure variables */
1436 bp->link_params.mtu = bp->dev->mtu;
a2fbb9ea 1437
c18487ee
YR
1438 bnx2x_phy_hw_lock(bp);
1439 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1440 bnx2x_phy_hw_unlock(bp);
a2fbb9ea 1441
c18487ee
YR
1442 if (bp->link_vars.link_up)
1443 bnx2x_link_report(bp);
a2fbb9ea 1444
c18487ee
YR
1445 bnx2x_calc_fc_adv(bp);
1446 return rc;
a2fbb9ea
ET
1447}
1448
c18487ee 1449static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 1450{
c18487ee
YR
1451 bnx2x_phy_hw_lock(bp);
1452 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1453 bnx2x_phy_hw_unlock(bp);
a2fbb9ea 1454
c18487ee
YR
1455 bnx2x_calc_fc_adv(bp);
1456}
a2fbb9ea 1457
c18487ee
YR
1458static void bnx2x__link_reset(struct bnx2x *bp)
1459{
1460 bnx2x_phy_hw_lock(bp);
1461 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
1462 bnx2x_phy_hw_unlock(bp);
1463}
a2fbb9ea 1464
c18487ee
YR
1465static u8 bnx2x_link_test(struct bnx2x *bp)
1466{
1467 u8 rc;
a2fbb9ea 1468
c18487ee
YR
1469 bnx2x_phy_hw_lock(bp);
1470 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
1471 bnx2x_phy_hw_unlock(bp);
a2fbb9ea 1472
c18487ee
YR
1473 return rc;
1474}
a2fbb9ea 1475
c18487ee
YR
1476/* This function is called upon link interrupt */
1477static void bnx2x_link_attn(struct bnx2x *bp)
1478{
1479 bnx2x_phy_hw_lock(bp);
1480 bnx2x_link_update(&bp->link_params, &bp->link_vars);
1481 bnx2x_phy_hw_unlock(bp);
a2fbb9ea 1482
c18487ee
YR
1483 /* indicate link status */
1484 bnx2x_link_report(bp);
1485}
a2fbb9ea 1486
c18487ee
YR
1487static void bnx2x__link_status_update(struct bnx2x *bp)
1488{
1489 if (bp->state != BNX2X_STATE_OPEN)
1490 return;
a2fbb9ea 1491
c18487ee 1492 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 1493
c18487ee
YR
1494 /* indicate link status */
1495 bnx2x_link_report(bp);
a2fbb9ea 1496}
a2fbb9ea 1497
c18487ee 1498/* end of Link */
a2fbb9ea
ET
1499
1500/* slow path */
1501
1502/*
1503 * General service functions
1504 */
1505
1506/* the slow path queue is odd since completions arrive on the fastpath ring */
1507static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
1508 u32 data_hi, u32 data_lo, int common)
1509{
1510 int port = bp->port;
1511
1512 DP(NETIF_MSG_TIMER,
c14423fe 1513 "spe (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
1514 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
1515 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
1516 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
1517
1518#ifdef BNX2X_STOP_ON_ERROR
1519 if (unlikely(bp->panic))
1520 return -EIO;
1521#endif
1522
1523 spin_lock(&bp->spq_lock);
1524
1525 if (!bp->spq_left) {
1526 BNX2X_ERR("BUG! SPQ ring full!\n");
1527 spin_unlock(&bp->spq_lock);
1528 bnx2x_panic();
1529 return -EBUSY;
1530 }
f1410647 1531
a2fbb9ea
ET
1532 /* CID needs port number to be encoded int it */
1533 bp->spq_prod_bd->hdr.conn_and_cmd_data =
1534 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
1535 HW_CID(bp, cid)));
1536 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
1537 if (common)
1538 bp->spq_prod_bd->hdr.type |=
1539 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
1540
1541 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
1542 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
1543
1544 bp->spq_left--;
1545
1546 if (bp->spq_prod_bd == bp->spq_last_bd) {
1547 bp->spq_prod_bd = bp->spq;
1548 bp->spq_prod_idx = 0;
1549 DP(NETIF_MSG_TIMER, "end of spq\n");
1550
1551 } else {
1552 bp->spq_prod_bd++;
1553 bp->spq_prod_idx++;
1554 }
1555
1556 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(port),
1557 bp->spq_prod_idx);
1558
1559 spin_unlock(&bp->spq_lock);
1560 return 0;
1561}
1562
1563/* acquire split MCP access lock register */
1564static int bnx2x_lock_alr(struct bnx2x *bp)
1565{
1566 int rc = 0;
1567 u32 i, j, val;
1568
1569 might_sleep();
1570 i = 100;
1571 for (j = 0; j < i*10; j++) {
1572 val = (1UL << 31);
1573 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
1574 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
1575 if (val & (1L << 31))
1576 break;
1577
1578 msleep(5);
1579 }
1580
1581 if (!(val & (1L << 31))) {
1582 BNX2X_ERR("Cannot acquire nvram interface\n");
1583
1584 rc = -EBUSY;
1585 }
1586
1587 return rc;
1588}
1589
1590/* Release split MCP access lock register */
1591static void bnx2x_unlock_alr(struct bnx2x *bp)
1592{
1593 u32 val = 0;
1594
1595 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
1596}
1597
1598static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
1599{
1600 struct host_def_status_block *def_sb = bp->def_status_blk;
1601 u16 rc = 0;
1602
1603 barrier(); /* status block is written to by the chip */
1604
1605 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
1606 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
1607 rc |= 1;
1608 }
1609 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
1610 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
1611 rc |= 2;
1612 }
1613 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
1614 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
1615 rc |= 4;
1616 }
1617 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
1618 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
1619 rc |= 8;
1620 }
1621 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
1622 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
1623 rc |= 16;
1624 }
1625 return rc;
1626}
1627
1628/*
1629 * slow path service functions
1630 */
1631
1632static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
1633{
1634 int port = bp->port;
1635 u32 igu_addr = (IGU_ADDR_ATTN_BITS_SET + IGU_PORT_BASE * port) * 8;
1636 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
1637 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
1638 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
1639 NIG_REG_MASK_INTERRUPT_PORT0;
a2fbb9ea
ET
1640
1641 if (~bp->aeu_mask & (asserted & 0xff))
1642 BNX2X_ERR("IGU ERROR\n");
1643 if (bp->attn_state & asserted)
1644 BNX2X_ERR("IGU ERROR\n");
1645
1646 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
1647 bp->aeu_mask, asserted);
1648 bp->aeu_mask &= ~(asserted & 0xff);
1649 DP(NETIF_MSG_HW, "after masking: aeu_mask %x\n", bp->aeu_mask);
1650
1651 REG_WR(bp, aeu_addr, bp->aeu_mask);
1652
1653 bp->attn_state |= asserted;
1654
1655 if (asserted & ATTN_HARD_WIRED_MASK) {
1656 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 1657
877e9aa4
ET
1658 /* save nig interrupt mask */
1659 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
1660 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 1661
c18487ee 1662 bnx2x_link_attn(bp);
a2fbb9ea
ET
1663
1664 /* handle unicore attn? */
1665 }
1666 if (asserted & ATTN_SW_TIMER_4_FUNC)
1667 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
1668
1669 if (asserted & GPIO_2_FUNC)
1670 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
1671
1672 if (asserted & GPIO_3_FUNC)
1673 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
1674
1675 if (asserted & GPIO_4_FUNC)
1676 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
1677
1678 if (port == 0) {
1679 if (asserted & ATTN_GENERAL_ATTN_1) {
1680 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
1681 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
1682 }
1683 if (asserted & ATTN_GENERAL_ATTN_2) {
1684 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
1685 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
1686 }
1687 if (asserted & ATTN_GENERAL_ATTN_3) {
1688 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
1689 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
1690 }
1691 } else {
1692 if (asserted & ATTN_GENERAL_ATTN_4) {
1693 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
1694 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
1695 }
1696 if (asserted & ATTN_GENERAL_ATTN_5) {
1697 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
1698 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
1699 }
1700 if (asserted & ATTN_GENERAL_ATTN_6) {
1701 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
1702 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
1703 }
1704 }
1705
1706 } /* if hardwired */
1707
1708 DP(NETIF_MSG_HW, "about to mask 0x%08x at IGU addr 0x%x\n",
1709 asserted, BAR_IGU_INTMEM + igu_addr);
1710 REG_WR(bp, BAR_IGU_INTMEM + igu_addr, asserted);
1711
1712 /* now set back the mask */
1713 if (asserted & ATTN_NIG_FOR_FUNC)
877e9aa4 1714 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
a2fbb9ea
ET
1715}
1716
877e9aa4 1717static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea
ET
1718{
1719 int port = bp->port;
877e9aa4
ET
1720 int reg_offset;
1721 u32 val;
1722
1723 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
1724
1725 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
1726 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
1727
1728 val = REG_RD(bp, reg_offset);
1729 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
1730 REG_WR(bp, reg_offset, val);
1731
1732 BNX2X_ERR("SPIO5 hw attention\n");
1733
1734 switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
1735 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
1736 /* Fan failure attention */
1737
1738 /* The PHY reset is controled by GPIO 1 */
1739 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
1740 MISC_REGISTERS_GPIO_OUTPUT_LOW);
1741 /* Low power mode is controled by GPIO 2 */
1742 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
1743 MISC_REGISTERS_GPIO_OUTPUT_LOW);
1744 /* mark the failure */
c18487ee 1745 bp->link_params.ext_phy_config &=
877e9aa4 1746 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 1747 bp->link_params.ext_phy_config |=
877e9aa4
ET
1748 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
1749 SHMEM_WR(bp,
1750 dev_info.port_hw_config[port].
1751 external_phy_config,
c18487ee 1752 bp->link_params.ext_phy_config);
877e9aa4
ET
1753 /* log the failure */
1754 printk(KERN_ERR PFX "Fan Failure on Network"
1755 " Controller %s has caused the driver to"
1756 " shutdown the card to prevent permanent"
1757 " damage. Please contact Dell Support for"
1758 " assistance\n", bp->dev->name);
1759 break;
1760
1761 default:
1762 break;
1763 }
1764 }
1765}
1766
1767static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
1768{
1769 u32 val;
1770
1771 if (attn & BNX2X_DOORQ_ASSERT) {
1772
1773 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
1774 BNX2X_ERR("DB hw attention 0x%x\n", val);
1775 /* DORQ discard attention */
1776 if (val & 0x2)
1777 BNX2X_ERR("FATAL error from DORQ\n");
1778 }
1779}
1780
1781static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
1782{
1783 u32 val;
1784
1785 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
1786
1787 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
1788 BNX2X_ERR("CFC hw attention 0x%x\n", val);
1789 /* CFC error attention */
1790 if (val & 0x2)
1791 BNX2X_ERR("FATAL error from CFC\n");
1792 }
1793
1794 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
1795
1796 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
1797 BNX2X_ERR("PXP hw attention 0x%x\n", val);
1798 /* RQ_USDMDP_FIFO_OVERFLOW */
1799 if (val & 0x18000)
1800 BNX2X_ERR("FATAL error from PXP\n");
1801 }
1802}
1803
1804static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
1805{
1806 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
1807
1808 if (attn & BNX2X_MC_ASSERT_BITS) {
1809
1810 BNX2X_ERR("MC assert!\n");
1811 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
1812 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
1813 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
1814 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
1815 bnx2x_panic();
1816
1817 } else if (attn & BNX2X_MCP_ASSERT) {
1818
1819 BNX2X_ERR("MCP assert!\n");
1820 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
1821 bnx2x_mc_assert(bp);
1822
1823 } else
1824 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
1825 }
1826
1827 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
1828
1829 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
1830 BNX2X_ERR("LATCHED attention 0x%x (masked)\n", attn);
1831 }
1832}
1833
1834static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
1835{
a2fbb9ea
ET
1836 struct attn_route attn;
1837 struct attn_route group_mask;
877e9aa4
ET
1838 int port = bp->port;
1839 int index;
a2fbb9ea
ET
1840 u32 reg_addr;
1841 u32 val;
1842
1843 /* need to take HW lock because MCP or other port might also
1844 try to handle this event */
1845 bnx2x_lock_alr(bp);
1846
1847 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
1848 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
1849 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
1850 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
1851 DP(NETIF_MSG_HW, "attn %llx\n", (unsigned long long)attn.sig[0]);
1852
1853 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
1854 if (deasserted & (1 << index)) {
1855 group_mask = bp->attn_group[index];
1856
1857 DP(NETIF_MSG_HW, "group[%d]: %llx\n", index,
1858 (unsigned long long)group_mask.sig[0]);
1859
877e9aa4
ET
1860 bnx2x_attn_int_deasserted3(bp,
1861 attn.sig[3] & group_mask.sig[3]);
1862 bnx2x_attn_int_deasserted1(bp,
1863 attn.sig[1] & group_mask.sig[1]);
1864 bnx2x_attn_int_deasserted2(bp,
1865 attn.sig[2] & group_mask.sig[2]);
1866 bnx2x_attn_int_deasserted0(bp,
1867 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea
ET
1868
1869 if ((attn.sig[0] & group_mask.sig[0] &
1870 HW_INTERRUT_ASSERT_SET_0) ||
1871 (attn.sig[1] & group_mask.sig[1] &
1872 HW_INTERRUT_ASSERT_SET_1) ||
1873 (attn.sig[2] & group_mask.sig[2] &
1874 HW_INTERRUT_ASSERT_SET_2))
877e9aa4
ET
1875 BNX2X_ERR("FATAL HW block attention"
1876 " set0 0x%x set1 0x%x"
1877 " set2 0x%x\n",
1878 (attn.sig[0] & group_mask.sig[0] &
1879 HW_INTERRUT_ASSERT_SET_0),
1880 (attn.sig[1] & group_mask.sig[1] &
1881 HW_INTERRUT_ASSERT_SET_1),
1882 (attn.sig[2] & group_mask.sig[2] &
1883 HW_INTERRUT_ASSERT_SET_2));
a2fbb9ea
ET
1884
1885 if ((attn.sig[0] & group_mask.sig[0] &
1886 HW_PRTY_ASSERT_SET_0) ||
1887 (attn.sig[1] & group_mask.sig[1] &
1888 HW_PRTY_ASSERT_SET_1) ||
1889 (attn.sig[2] & group_mask.sig[2] &
1890 HW_PRTY_ASSERT_SET_2))
877e9aa4 1891 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
1892 }
1893 }
1894
1895 bnx2x_unlock_alr(bp);
1896
1897 reg_addr = (IGU_ADDR_ATTN_BITS_CLR + IGU_PORT_BASE * port) * 8;
1898
1899 val = ~deasserted;
1900/* DP(NETIF_MSG_INTR, "write 0x%08x to IGU addr 0x%x\n",
1901 val, BAR_IGU_INTMEM + reg_addr); */
1902 REG_WR(bp, BAR_IGU_INTMEM + reg_addr, val);
1903
1904 if (bp->aeu_mask & (deasserted & 0xff))
1905 BNX2X_ERR("IGU BUG\n");
1906 if (~bp->attn_state & deasserted)
1907 BNX2X_ERR("IGU BUG\n");
1908
1909 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
1910 MISC_REG_AEU_MASK_ATTN_FUNC_0;
1911
1912 DP(NETIF_MSG_HW, "aeu_mask %x\n", bp->aeu_mask);
1913 bp->aeu_mask |= (deasserted & 0xff);
1914
1915 DP(NETIF_MSG_HW, "new mask %x\n", bp->aeu_mask);
1916 REG_WR(bp, reg_addr, bp->aeu_mask);
1917
1918 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
1919 bp->attn_state &= ~deasserted;
1920 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
1921}
1922
1923static void bnx2x_attn_int(struct bnx2x *bp)
1924{
1925 /* read local copy of bits */
1926 u32 attn_bits = bp->def_status_blk->atten_status_block.attn_bits;
1927 u32 attn_ack = bp->def_status_blk->atten_status_block.attn_bits_ack;
1928 u32 attn_state = bp->attn_state;
1929
1930 /* look for changed bits */
1931 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
1932 u32 deasserted = ~attn_bits & attn_ack & attn_state;
1933
1934 DP(NETIF_MSG_HW,
1935 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
1936 attn_bits, attn_ack, asserted, deasserted);
1937
1938 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
1939 BNX2X_ERR("bad attention state\n");
1940
1941 /* handle bits that were raised */
1942 if (asserted)
1943 bnx2x_attn_int_asserted(bp, asserted);
1944
1945 if (deasserted)
1946 bnx2x_attn_int_deasserted(bp, deasserted);
1947}
1948
1949static void bnx2x_sp_task(struct work_struct *work)
1950{
1951 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task);
1952 u16 status;
1953
1954 /* Return here if interrupt is disabled */
1955 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
877e9aa4 1956 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
1957 return;
1958 }
1959
1960 status = bnx2x_update_dsb_idx(bp);
1961 if (status == 0)
1962 BNX2X_ERR("spurious slowpath interrupt!\n");
1963
1964 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
1965
877e9aa4
ET
1966 /* HW attentions */
1967 if (status & 0x1)
a2fbb9ea 1968 bnx2x_attn_int(bp);
a2fbb9ea 1969
877e9aa4 1970 /* CStorm events: query_stats, port delete ramrod */
a2fbb9ea
ET
1971 if (status & 0x2)
1972 bp->stat_pending = 0;
1973
1974 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, bp->def_att_idx,
1975 IGU_INT_NOP, 1);
1976 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
1977 IGU_INT_NOP, 1);
1978 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
1979 IGU_INT_NOP, 1);
1980 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
1981 IGU_INT_NOP, 1);
1982 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
1983 IGU_INT_ENABLE, 1);
877e9aa4 1984
a2fbb9ea
ET
1985}
1986
1987static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
1988{
1989 struct net_device *dev = dev_instance;
1990 struct bnx2x *bp = netdev_priv(dev);
1991
1992 /* Return here if interrupt is disabled */
1993 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
877e9aa4 1994 DP(BNX2X_MSG_SP, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
1995 return IRQ_HANDLED;
1996 }
1997
877e9aa4 1998 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1999
2000#ifdef BNX2X_STOP_ON_ERROR
2001 if (unlikely(bp->panic))
2002 return IRQ_HANDLED;
2003#endif
2004
2005 schedule_work(&bp->sp_task);
2006
2007 return IRQ_HANDLED;
2008}
2009
2010/* end of slow path */
2011
2012/* Statistics */
2013
2014/****************************************************************************
2015* Macros
2016****************************************************************************/
2017
2018#define UPDATE_STAT(s, t) \
2019 do { \
2020 estats->t += new->s - old->s; \
2021 old->s = new->s; \
2022 } while (0)
2023
2024/* sum[hi:lo] += add[hi:lo] */
2025#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2026 do { \
2027 s_lo += a_lo; \
2028 s_hi += a_hi + (s_lo < a_lo) ? 1 : 0; \
2029 } while (0)
2030
2031/* difference = minuend - subtrahend */
2032#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2033 do { \
2034 if (m_lo < s_lo) { /* underflow */ \
2035 d_hi = m_hi - s_hi; \
2036 if (d_hi > 0) { /* we can 'loan' 1 */ \
2037 d_hi--; \
2038 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
2039 } else { /* m_hi <= s_hi */ \
2040 d_hi = 0; \
2041 d_lo = 0; \
2042 } \
2043 } else { /* m_lo >= s_lo */ \
2044 if (m_hi < s_hi) { \
2045 d_hi = 0; \
2046 d_lo = 0; \
2047 } else { /* m_hi >= s_hi */ \
2048 d_hi = m_hi - s_hi; \
2049 d_lo = m_lo - s_lo; \
2050 } \
2051 } \
2052 } while (0)
2053
2054/* minuend -= subtrahend */
2055#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
2056 do { \
2057 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
2058 } while (0)
2059
2060#define UPDATE_STAT64(s_hi, t_hi, s_lo, t_lo) \
2061 do { \
2062 DIFF_64(diff.hi, new->s_hi, old->s_hi, \
2063 diff.lo, new->s_lo, old->s_lo); \
2064 old->s_hi = new->s_hi; \
2065 old->s_lo = new->s_lo; \
2066 ADD_64(estats->t_hi, diff.hi, \
2067 estats->t_lo, diff.lo); \
2068 } while (0)
2069
2070/* sum[hi:lo] += add */
2071#define ADD_EXTEND_64(s_hi, s_lo, a) \
2072 do { \
2073 s_lo += a; \
2074 s_hi += (s_lo < a) ? 1 : 0; \
2075 } while (0)
2076
2077#define UPDATE_EXTEND_STAT(s, t_hi, t_lo) \
2078 do { \
2079 ADD_EXTEND_64(estats->t_hi, estats->t_lo, new->s); \
2080 } while (0)
2081
2082#define UPDATE_EXTEND_TSTAT(s, t_hi, t_lo) \
2083 do { \
2084 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2085 old_tclient->s = le32_to_cpu(tclient->s); \
2086 ADD_EXTEND_64(estats->t_hi, estats->t_lo, diff); \
2087 } while (0)
2088
2089/*
2090 * General service functions
2091 */
2092
2093static inline long bnx2x_hilo(u32 *hiref)
2094{
2095 u32 lo = *(hiref + 1);
2096#if (BITS_PER_LONG == 64)
2097 u32 hi = *hiref;
2098
2099 return HILO_U64(hi, lo);
2100#else
2101 return lo;
2102#endif
2103}
2104
2105/*
2106 * Init service functions
2107 */
2108
2109static void bnx2x_init_mac_stats(struct bnx2x *bp)
2110{
2111 struct dmae_command *dmae;
2112 int port = bp->port;
2113 int loader_idx = port * 8;
2114 u32 opcode;
2115 u32 mac_addr;
2116
2117 bp->executer_idx = 0;
2118 if (bp->fw_mb) {
2119 /* MCP */
2120 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
2121 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
2122#ifdef __BIG_ENDIAN
2123 DMAE_CMD_ENDIANITY_B_DW_SWAP |
2124#else
2125 DMAE_CMD_ENDIANITY_DW_SWAP |
2126#endif
2127 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
2128
c18487ee 2129 if (bp->link_vars.link_up)
a2fbb9ea
ET
2130 opcode |= (DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE);
2131
2132 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2133 dmae->opcode = opcode;
2134 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, eth_stats) +
2135 sizeof(u32));
2136 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, eth_stats) +
2137 sizeof(u32));
2138 dmae->dst_addr_lo = bp->fw_mb >> 2;
2139 dmae->dst_addr_hi = 0;
2140 dmae->len = (offsetof(struct bnx2x_eth_stats, mac_stx_end) -
2141 sizeof(u32)) >> 2;
c18487ee 2142 if (bp->link_vars.link_up) {
a2fbb9ea
ET
2143 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
2144 dmae->comp_addr_hi = 0;
2145 dmae->comp_val = 1;
2146 } else {
2147 dmae->comp_addr_lo = 0;
2148 dmae->comp_addr_hi = 0;
2149 dmae->comp_val = 0;
2150 }
2151 }
2152
c18487ee 2153 if (!bp->link_vars.link_up) {
a2fbb9ea
ET
2154 /* no need to collect statistics in link down */
2155 return;
2156 }
2157
2158 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
2159 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
2160 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
2161#ifdef __BIG_ENDIAN
2162 DMAE_CMD_ENDIANITY_B_DW_SWAP |
2163#else
2164 DMAE_CMD_ENDIANITY_DW_SWAP |
2165#endif
2166 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
2167
c18487ee 2168 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
2169
2170 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
2171 NIG_REG_INGRESS_BMAC0_MEM);
2172
2173 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
2174 BIGMAC_REGISTER_TX_STAT_GTBYT */
2175 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2176 dmae->opcode = opcode;
2177 dmae->src_addr_lo = (mac_addr +
2178 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
2179 dmae->src_addr_hi = 0;
2180 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
2181 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
2182 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
2183 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
2184 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
2185 dmae->comp_addr_hi = 0;
2186 dmae->comp_val = 1;
2187
2188 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
2189 BIGMAC_REGISTER_RX_STAT_GRIPJ */
2190 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2191 dmae->opcode = opcode;
2192 dmae->src_addr_lo = (mac_addr +
2193 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
2194 dmae->src_addr_hi = 0;
2195 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
2196 offsetof(struct bmac_stats, rx_gr64));
2197 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
2198 offsetof(struct bmac_stats, rx_gr64));
2199 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
2200 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
2201 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
2202 dmae->comp_addr_hi = 0;
2203 dmae->comp_val = 1;
2204
c18487ee 2205 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
2206
2207 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
2208
2209 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
2210 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2211 dmae->opcode = opcode;
2212 dmae->src_addr_lo = (mac_addr +
2213 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
2214 dmae->src_addr_hi = 0;
2215 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
2216 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
2217 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
2218 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
2219 dmae->comp_addr_hi = 0;
2220 dmae->comp_val = 1;
2221
2222 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
2223 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2224 dmae->opcode = opcode;
2225 dmae->src_addr_lo = (mac_addr +
2226 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
2227 dmae->src_addr_hi = 0;
2228 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
2229 offsetof(struct emac_stats,
2230 rx_falsecarriererrors));
2231 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
2232 offsetof(struct emac_stats,
2233 rx_falsecarriererrors));
2234 dmae->len = 1;
2235 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
2236 dmae->comp_addr_hi = 0;
2237 dmae->comp_val = 1;
2238
2239 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
2240 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2241 dmae->opcode = opcode;
2242 dmae->src_addr_lo = (mac_addr +
2243 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
2244 dmae->src_addr_hi = 0;
2245 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
2246 offsetof(struct emac_stats,
2247 tx_ifhcoutoctets));
2248 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
2249 offsetof(struct emac_stats,
2250 tx_ifhcoutoctets));
2251 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
2252 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
2253 dmae->comp_addr_hi = 0;
2254 dmae->comp_val = 1;
2255 }
2256
2257 /* NIG */
2258 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
2259 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
2260 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
2261 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
2262#ifdef __BIG_ENDIAN
2263 DMAE_CMD_ENDIANITY_B_DW_SWAP |
2264#else
2265 DMAE_CMD_ENDIANITY_DW_SWAP |
2266#endif
2267 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
2268 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
2269 NIG_REG_STAT0_BRB_DISCARD) >> 2;
2270 dmae->src_addr_hi = 0;
2271 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig));
2272 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig));
2273 dmae->len = (sizeof(struct nig_stats) - 2*sizeof(u32)) >> 2;
2274 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig) +
2275 offsetof(struct nig_stats, done));
2276 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig) +
2277 offsetof(struct nig_stats, done));
2278 dmae->comp_val = 0xffffffff;
2279}
2280
2281static void bnx2x_init_stats(struct bnx2x *bp)
2282{
2283 int port = bp->port;
2284
2285 bp->stats_state = STATS_STATE_DISABLE;
2286 bp->executer_idx = 0;
2287
2288 bp->old_brb_discard = REG_RD(bp,
2289 NIG_REG_STAT0_BRB_DISCARD + port*0x38);
2290
2291 memset(&bp->old_bmac, 0, sizeof(struct bmac_stats));
2292 memset(&bp->old_tclient, 0, sizeof(struct tstorm_per_client_stats));
2293 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
2294
2295 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port), 1);
2296 REG_WR(bp, BAR_XSTRORM_INTMEM +
2297 XSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
2298
2299 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port), 1);
2300 REG_WR(bp, BAR_TSTRORM_INTMEM +
2301 TSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
2302
2303 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port), 0);
2304 REG_WR(bp, BAR_CSTRORM_INTMEM +
2305 CSTORM_STATS_FLAGS_OFFSET(port) + 4, 0);
2306
2307 REG_WR(bp, BAR_XSTRORM_INTMEM +
2308 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port),
2309 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
2310 REG_WR(bp, BAR_XSTRORM_INTMEM +
2311 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port) + 4,
2312 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
2313
2314 REG_WR(bp, BAR_TSTRORM_INTMEM +
2315 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port),
2316 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
2317 REG_WR(bp, BAR_TSTRORM_INTMEM +
2318 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(port) + 4,
2319 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
2320}
2321
2322static void bnx2x_stop_stats(struct bnx2x *bp)
2323{
2324 might_sleep();
2325 if (bp->stats_state != STATS_STATE_DISABLE) {
2326 int timeout = 10;
2327
2328 bp->stats_state = STATS_STATE_STOP;
2329 DP(BNX2X_MSG_STATS, "stats_state - STOP\n");
2330
2331 while (bp->stats_state != STATS_STATE_DISABLE) {
2332 if (!timeout) {
c14423fe 2333 BNX2X_ERR("timeout waiting for stats stop\n");
a2fbb9ea
ET
2334 break;
2335 }
2336 timeout--;
2337 msleep(100);
2338 }
2339 }
2340 DP(BNX2X_MSG_STATS, "stats_state - DISABLE\n");
2341}
2342
2343/*
2344 * Statistics service functions
2345 */
2346
2347static void bnx2x_update_bmac_stats(struct bnx2x *bp)
2348{
2349 struct regp diff;
2350 struct regp sum;
2351 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac);
2352 struct bmac_stats *old = &bp->old_bmac;
2353 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
2354
2355 sum.hi = 0;
2356 sum.lo = 0;
2357
2358 UPDATE_STAT64(tx_gtbyt.hi, total_bytes_transmitted_hi,
2359 tx_gtbyt.lo, total_bytes_transmitted_lo);
2360
2361 UPDATE_STAT64(tx_gtmca.hi, total_multicast_packets_transmitted_hi,
2362 tx_gtmca.lo, total_multicast_packets_transmitted_lo);
2363 ADD_64(sum.hi, diff.hi, sum.lo, diff.lo);
2364
2365 UPDATE_STAT64(tx_gtgca.hi, total_broadcast_packets_transmitted_hi,
2366 tx_gtgca.lo, total_broadcast_packets_transmitted_lo);
2367 ADD_64(sum.hi, diff.hi, sum.lo, diff.lo);
2368
2369 UPDATE_STAT64(tx_gtpkt.hi, total_unicast_packets_transmitted_hi,
2370 tx_gtpkt.lo, total_unicast_packets_transmitted_lo);
2371 SUB_64(estats->total_unicast_packets_transmitted_hi, sum.hi,
2372 estats->total_unicast_packets_transmitted_lo, sum.lo);
2373
2374 UPDATE_STAT(tx_gtxpf.lo, pause_xoff_frames_transmitted);
2375 UPDATE_STAT(tx_gt64.lo, frames_transmitted_64_bytes);
2376 UPDATE_STAT(tx_gt127.lo, frames_transmitted_65_127_bytes);
2377 UPDATE_STAT(tx_gt255.lo, frames_transmitted_128_255_bytes);
2378 UPDATE_STAT(tx_gt511.lo, frames_transmitted_256_511_bytes);
2379 UPDATE_STAT(tx_gt1023.lo, frames_transmitted_512_1023_bytes);
2380 UPDATE_STAT(tx_gt1518.lo, frames_transmitted_1024_1522_bytes);
2381 UPDATE_STAT(tx_gt2047.lo, frames_transmitted_1523_9022_bytes);
2382 UPDATE_STAT(tx_gt4095.lo, frames_transmitted_1523_9022_bytes);
2383 UPDATE_STAT(tx_gt9216.lo, frames_transmitted_1523_9022_bytes);
2384 UPDATE_STAT(tx_gt16383.lo, frames_transmitted_1523_9022_bytes);
2385
2386 UPDATE_STAT(rx_grfcs.lo, crc_receive_errors);
2387 UPDATE_STAT(rx_grund.lo, runt_packets_received);
2388 UPDATE_STAT(rx_grovr.lo, stat_Dot3statsFramesTooLong);
2389 UPDATE_STAT(rx_grxpf.lo, pause_xoff_frames_received);
2390 UPDATE_STAT(rx_grxcf.lo, control_frames_received);
2391 /* UPDATE_STAT(rx_grxpf.lo, control_frames_received); */
2392 UPDATE_STAT(rx_grfrg.lo, error_runt_packets_received);
2393 UPDATE_STAT(rx_grjbr.lo, error_jabber_packets_received);
2394
2395 UPDATE_STAT64(rx_grerb.hi, stat_IfHCInBadOctets_hi,
2396 rx_grerb.lo, stat_IfHCInBadOctets_lo);
2397 UPDATE_STAT64(tx_gtufl.hi, stat_IfHCOutBadOctets_hi,
2398 tx_gtufl.lo, stat_IfHCOutBadOctets_lo);
2399 UPDATE_STAT(tx_gterr.lo, stat_Dot3statsInternalMacTransmitErrors);
2400 /* UPDATE_STAT(rx_grxpf.lo, stat_XoffStateEntered); */
2401 estats->stat_XoffStateEntered = estats->pause_xoff_frames_received;
2402}
2403
2404static void bnx2x_update_emac_stats(struct bnx2x *bp)
2405{
2406 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac);
2407 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
2408
2409 UPDATE_EXTEND_STAT(tx_ifhcoutoctets, total_bytes_transmitted_hi,
2410 total_bytes_transmitted_lo);
2411 UPDATE_EXTEND_STAT(tx_ifhcoutucastpkts,
2412 total_unicast_packets_transmitted_hi,
2413 total_unicast_packets_transmitted_lo);
2414 UPDATE_EXTEND_STAT(tx_ifhcoutmulticastpkts,
2415 total_multicast_packets_transmitted_hi,
2416 total_multicast_packets_transmitted_lo);
2417 UPDATE_EXTEND_STAT(tx_ifhcoutbroadcastpkts,
2418 total_broadcast_packets_transmitted_hi,
2419 total_broadcast_packets_transmitted_lo);
2420
2421 estats->pause_xon_frames_transmitted += new->tx_outxonsent;
2422 estats->pause_xoff_frames_transmitted += new->tx_outxoffsent;
2423 estats->single_collision_transmit_frames +=
2424 new->tx_dot3statssinglecollisionframes;
2425 estats->multiple_collision_transmit_frames +=
2426 new->tx_dot3statsmultiplecollisionframes;
2427 estats->late_collision_frames += new->tx_dot3statslatecollisions;
2428 estats->excessive_collision_frames +=
2429 new->tx_dot3statsexcessivecollisions;
2430 estats->frames_transmitted_64_bytes += new->tx_etherstatspkts64octets;
2431 estats->frames_transmitted_65_127_bytes +=
2432 new->tx_etherstatspkts65octetsto127octets;
2433 estats->frames_transmitted_128_255_bytes +=
2434 new->tx_etherstatspkts128octetsto255octets;
2435 estats->frames_transmitted_256_511_bytes +=
2436 new->tx_etherstatspkts256octetsto511octets;
2437 estats->frames_transmitted_512_1023_bytes +=
2438 new->tx_etherstatspkts512octetsto1023octets;
2439 estats->frames_transmitted_1024_1522_bytes +=
2440 new->tx_etherstatspkts1024octetsto1522octet;
2441 estats->frames_transmitted_1523_9022_bytes +=
2442 new->tx_etherstatspktsover1522octets;
2443
2444 estats->crc_receive_errors += new->rx_dot3statsfcserrors;
2445 estats->alignment_errors += new->rx_dot3statsalignmenterrors;
2446 estats->false_carrier_detections += new->rx_falsecarriererrors;
2447 estats->runt_packets_received += new->rx_etherstatsundersizepkts;
2448 estats->stat_Dot3statsFramesTooLong += new->rx_dot3statsframestoolong;
2449 estats->pause_xon_frames_received += new->rx_xonpauseframesreceived;
2450 estats->pause_xoff_frames_received += new->rx_xoffpauseframesreceived;
2451 estats->control_frames_received += new->rx_maccontrolframesreceived;
2452 estats->error_runt_packets_received += new->rx_etherstatsfragments;
2453 estats->error_jabber_packets_received += new->rx_etherstatsjabbers;
2454
2455 UPDATE_EXTEND_STAT(rx_ifhcinbadoctets, stat_IfHCInBadOctets_hi,
2456 stat_IfHCInBadOctets_lo);
2457 UPDATE_EXTEND_STAT(tx_ifhcoutbadoctets, stat_IfHCOutBadOctets_hi,
2458 stat_IfHCOutBadOctets_lo);
2459 estats->stat_Dot3statsInternalMacTransmitErrors +=
2460 new->tx_dot3statsinternalmactransmiterrors;
2461 estats->stat_Dot3StatsCarrierSenseErrors +=
2462 new->rx_dot3statscarriersenseerrors;
2463 estats->stat_Dot3StatsDeferredTransmissions +=
2464 new->tx_dot3statsdeferredtransmissions;
2465 estats->stat_FlowControlDone += new->tx_flowcontroldone;
2466 estats->stat_XoffStateEntered += new->rx_xoffstateentered;
2467}
2468
2469static int bnx2x_update_storm_stats(struct bnx2x *bp)
2470{
2471 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
2472 struct tstorm_common_stats *tstats = &stats->tstorm_common;
2473 struct tstorm_per_client_stats *tclient =
2474 &tstats->client_statistics[0];
2475 struct tstorm_per_client_stats *old_tclient = &bp->old_tclient;
2476 struct xstorm_common_stats *xstats = &stats->xstorm_common;
2477 struct nig_stats *nstats = bnx2x_sp(bp, nig);
2478 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
2479 u32 diff;
2480
2481 /* are DMAE stats valid? */
2482 if (nstats->done != 0xffffffff) {
2483 DP(BNX2X_MSG_STATS, "stats not updated by dmae\n");
2484 return -1;
2485 }
2486
2487 /* are storm stats valid? */
2488 if (tstats->done.hi != 0xffffffff) {
2489 DP(BNX2X_MSG_STATS, "stats not updated by tstorm\n");
2490 return -2;
2491 }
2492 if (xstats->done.hi != 0xffffffff) {
2493 DP(BNX2X_MSG_STATS, "stats not updated by xstorm\n");
2494 return -3;
2495 }
2496
2497 estats->total_bytes_received_hi =
2498 estats->valid_bytes_received_hi =
2499 le32_to_cpu(tclient->total_rcv_bytes.hi);
2500 estats->total_bytes_received_lo =
2501 estats->valid_bytes_received_lo =
2502 le32_to_cpu(tclient->total_rcv_bytes.lo);
2503 ADD_64(estats->total_bytes_received_hi,
2504 le32_to_cpu(tclient->rcv_error_bytes.hi),
2505 estats->total_bytes_received_lo,
2506 le32_to_cpu(tclient->rcv_error_bytes.lo));
2507
2508 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
2509 total_unicast_packets_received_hi,
2510 total_unicast_packets_received_lo);
2511 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
2512 total_multicast_packets_received_hi,
2513 total_multicast_packets_received_lo);
2514 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
2515 total_broadcast_packets_received_hi,
2516 total_broadcast_packets_received_lo);
2517
2518 estats->frames_received_64_bytes = MAC_STX_NA;
2519 estats->frames_received_65_127_bytes = MAC_STX_NA;
2520 estats->frames_received_128_255_bytes = MAC_STX_NA;
2521 estats->frames_received_256_511_bytes = MAC_STX_NA;
2522 estats->frames_received_512_1023_bytes = MAC_STX_NA;
2523 estats->frames_received_1024_1522_bytes = MAC_STX_NA;
2524 estats->frames_received_1523_9022_bytes = MAC_STX_NA;
2525
2526 estats->x_total_sent_bytes_hi =
2527 le32_to_cpu(xstats->total_sent_bytes.hi);
2528 estats->x_total_sent_bytes_lo =
2529 le32_to_cpu(xstats->total_sent_bytes.lo);
2530 estats->x_total_sent_pkts = le32_to_cpu(xstats->total_sent_pkts);
2531
2532 estats->t_rcv_unicast_bytes_hi =
2533 le32_to_cpu(tclient->rcv_unicast_bytes.hi);
2534 estats->t_rcv_unicast_bytes_lo =
2535 le32_to_cpu(tclient->rcv_unicast_bytes.lo);
2536 estats->t_rcv_broadcast_bytes_hi =
2537 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
2538 estats->t_rcv_broadcast_bytes_lo =
2539 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
2540 estats->t_rcv_multicast_bytes_hi =
2541 le32_to_cpu(tclient->rcv_multicast_bytes.hi);
2542 estats->t_rcv_multicast_bytes_lo =
2543 le32_to_cpu(tclient->rcv_multicast_bytes.lo);
2544 estats->t_total_rcv_pkt = le32_to_cpu(tclient->total_rcv_pkts);
2545
2546 estats->checksum_discard = le32_to_cpu(tclient->checksum_discard);
2547 estats->packets_too_big_discard =
2548 le32_to_cpu(tclient->packets_too_big_discard);
2549 estats->jabber_packets_received = estats->packets_too_big_discard +
2550 estats->stat_Dot3statsFramesTooLong;
2551 estats->no_buff_discard = le32_to_cpu(tclient->no_buff_discard);
2552 estats->ttl0_discard = le32_to_cpu(tclient->ttl0_discard);
2553 estats->mac_discard = le32_to_cpu(tclient->mac_discard);
2554 estats->mac_filter_discard = le32_to_cpu(tstats->mac_filter_discard);
2555 estats->xxoverflow_discard = le32_to_cpu(tstats->xxoverflow_discard);
2556 estats->brb_truncate_discard =
2557 le32_to_cpu(tstats->brb_truncate_discard);
2558
2559 estats->brb_discard += nstats->brb_discard - bp->old_brb_discard;
2560 bp->old_brb_discard = nstats->brb_discard;
2561
2562 estats->brb_packet = nstats->brb_packet;
2563 estats->brb_truncate = nstats->brb_truncate;
2564 estats->flow_ctrl_discard = nstats->flow_ctrl_discard;
2565 estats->flow_ctrl_octets = nstats->flow_ctrl_octets;
2566 estats->flow_ctrl_packet = nstats->flow_ctrl_packet;
2567 estats->mng_discard = nstats->mng_discard;
2568 estats->mng_octet_inp = nstats->mng_octet_inp;
2569 estats->mng_octet_out = nstats->mng_octet_out;
2570 estats->mng_packet_inp = nstats->mng_packet_inp;
2571 estats->mng_packet_out = nstats->mng_packet_out;
2572 estats->pbf_octets = nstats->pbf_octets;
2573 estats->pbf_packet = nstats->pbf_packet;
2574 estats->safc_inp = nstats->safc_inp;
2575
2576 xstats->done.hi = 0;
2577 tstats->done.hi = 0;
2578 nstats->done = 0;
2579
2580 return 0;
2581}
2582
2583static void bnx2x_update_net_stats(struct bnx2x *bp)
2584{
2585 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
2586 struct net_device_stats *nstats = &bp->dev->stats;
2587
2588 nstats->rx_packets =
2589 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
2590 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
2591 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
2592
2593 nstats->tx_packets =
2594 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
2595 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
2596 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
2597
2598 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
2599
0e39e645 2600 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 2601
0e39e645 2602 nstats->rx_dropped = estats->checksum_discard + estats->mac_discard;
a2fbb9ea
ET
2603 nstats->tx_dropped = 0;
2604
2605 nstats->multicast =
2606 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi);
2607
0e39e645
ET
2608 nstats->collisions = estats->single_collision_transmit_frames +
2609 estats->multiple_collision_transmit_frames +
2610 estats->late_collision_frames +
2611 estats->excessive_collision_frames;
a2fbb9ea
ET
2612
2613 nstats->rx_length_errors = estats->runt_packets_received +
2614 estats->jabber_packets_received;
0e39e645
ET
2615 nstats->rx_over_errors = estats->brb_discard +
2616 estats->brb_truncate_discard;
a2fbb9ea
ET
2617 nstats->rx_crc_errors = estats->crc_receive_errors;
2618 nstats->rx_frame_errors = estats->alignment_errors;
0e39e645 2619 nstats->rx_fifo_errors = estats->no_buff_discard;
a2fbb9ea
ET
2620 nstats->rx_missed_errors = estats->xxoverflow_discard;
2621
2622 nstats->rx_errors = nstats->rx_length_errors +
2623 nstats->rx_over_errors +
2624 nstats->rx_crc_errors +
2625 nstats->rx_frame_errors +
0e39e645
ET
2626 nstats->rx_fifo_errors +
2627 nstats->rx_missed_errors;
a2fbb9ea
ET
2628
2629 nstats->tx_aborted_errors = estats->late_collision_frames +
0e39e645 2630 estats->excessive_collision_frames;
a2fbb9ea
ET
2631 nstats->tx_carrier_errors = estats->false_carrier_detections;
2632 nstats->tx_fifo_errors = 0;
2633 nstats->tx_heartbeat_errors = 0;
2634 nstats->tx_window_errors = 0;
2635
2636 nstats->tx_errors = nstats->tx_aborted_errors +
2637 nstats->tx_carrier_errors;
2638
2639 estats->mac_stx_start = ++estats->mac_stx_end;
2640}
2641
2642static void bnx2x_update_stats(struct bnx2x *bp)
2643{
2644 int i;
2645
2646 if (!bnx2x_update_storm_stats(bp)) {
2647
c18487ee 2648 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
2649 bnx2x_update_bmac_stats(bp);
2650
c18487ee 2651 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
2652 bnx2x_update_emac_stats(bp);
2653
2654 } else { /* unreached */
2655 BNX2X_ERR("no MAC active\n");
2656 return;
2657 }
2658
2659 bnx2x_update_net_stats(bp);
2660 }
2661
2662 if (bp->msglevel & NETIF_MSG_TIMER) {
2663 struct bnx2x_eth_stats *estats = bnx2x_sp(bp, eth_stats);
2664 struct net_device_stats *nstats = &bp->dev->stats;
2665
2666 printk(KERN_DEBUG "%s:\n", bp->dev->name);
2667 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
2668 " tx pkt (%lx)\n",
2669 bnx2x_tx_avail(bp->fp),
2670 *bp->fp->tx_cons_sb, nstats->tx_packets);
2671 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
2672 " rx pkt (%lx)\n",
2673 (u16)(*bp->fp->rx_cons_sb - bp->fp->rx_comp_cons),
2674 *bp->fp->rx_cons_sb, nstats->rx_packets);
2675 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u\n",
2676 netif_queue_stopped(bp->dev)? "Xoff" : "Xon",
2677 estats->driver_xoff, estats->brb_discard);
2678 printk(KERN_DEBUG "tstats: checksum_discard %u "
2679 "packets_too_big_discard %u no_buff_discard %u "
2680 "mac_discard %u mac_filter_discard %u "
2681 "xxovrflow_discard %u brb_truncate_discard %u "
2682 "ttl0_discard %u\n",
2683 estats->checksum_discard,
2684 estats->packets_too_big_discard,
2685 estats->no_buff_discard, estats->mac_discard,
2686 estats->mac_filter_discard, estats->xxoverflow_discard,
2687 estats->brb_truncate_discard, estats->ttl0_discard);
2688
2689 for_each_queue(bp, i) {
2690 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
2691 bnx2x_fp(bp, i, tx_pkt),
2692 bnx2x_fp(bp, i, rx_pkt),
2693 bnx2x_fp(bp, i, rx_calls));
2694 }
2695 }
2696
2697 if (bp->state != BNX2X_STATE_OPEN) {
2698 DP(BNX2X_MSG_STATS, "state is %x, returning\n", bp->state);
2699 return;
2700 }
2701
2702#ifdef BNX2X_STOP_ON_ERROR
2703 if (unlikely(bp->panic))
2704 return;
2705#endif
2706
2707 /* loader */
2708 if (bp->executer_idx) {
2709 struct dmae_command *dmae = &bp->dmae;
2710 int port = bp->port;
2711 int loader_idx = port * 8;
2712
2713 memset(dmae, 0, sizeof(struct dmae_command));
2714
2715 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
2716 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
2717 DMAE_CMD_DST_RESET |
2718#ifdef __BIG_ENDIAN
2719 DMAE_CMD_ENDIANITY_B_DW_SWAP |
2720#else
2721 DMAE_CMD_ENDIANITY_DW_SWAP |
2722#endif
2723 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0));
2724 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
2725 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
2726 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
2727 sizeof(struct dmae_command) *
2728 (loader_idx + 1)) >> 2;
2729 dmae->dst_addr_hi = 0;
2730 dmae->len = sizeof(struct dmae_command) >> 2;
2731 dmae->len--; /* !!! for A0/1 only */
2732 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
2733 dmae->comp_addr_hi = 0;
2734 dmae->comp_val = 1;
2735
2736 bnx2x_post_dmae(bp, dmae, loader_idx);
2737 }
2738
2739 if (bp->stats_state != STATS_STATE_ENABLE) {
2740 bp->stats_state = STATS_STATE_DISABLE;
2741 return;
2742 }
2743
2744 if (bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0, 0, 0, 0) == 0) {
2745 /* stats ramrod has it's own slot on the spe */
2746 bp->spq_left++;
2747 bp->stat_pending = 1;
2748 }
2749}
2750
2751static void bnx2x_timer(unsigned long data)
2752{
2753 struct bnx2x *bp = (struct bnx2x *) data;
2754
2755 if (!netif_running(bp->dev))
2756 return;
2757
2758 if (atomic_read(&bp->intr_sem) != 0)
f1410647 2759 goto timer_restart;
a2fbb9ea
ET
2760
2761 if (poll) {
2762 struct bnx2x_fastpath *fp = &bp->fp[0];
2763 int rc;
2764
2765 bnx2x_tx_int(fp, 1000);
2766 rc = bnx2x_rx_int(fp, 1000);
2767 }
2768
f1410647 2769 if (!nomcp) {
a2fbb9ea
ET
2770 int port = bp->port;
2771 u32 drv_pulse;
2772 u32 mcp_pulse;
2773
2774 ++bp->fw_drv_pulse_wr_seq;
2775 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
2776 /* TBD - add SYSTEM_TIME */
2777 drv_pulse = bp->fw_drv_pulse_wr_seq;
f1410647 2778 SHMEM_WR(bp, func_mb[port].drv_pulse_mb, drv_pulse);
a2fbb9ea 2779
f1410647 2780 mcp_pulse = (SHMEM_RD(bp, func_mb[port].mcp_pulse_mb) &
a2fbb9ea
ET
2781 MCP_PULSE_SEQ_MASK);
2782 /* The delta between driver pulse and mcp response
2783 * should be 1 (before mcp response) or 0 (after mcp response)
2784 */
2785 if ((drv_pulse != mcp_pulse) &&
2786 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
2787 /* someone lost a heartbeat... */
2788 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
2789 drv_pulse, mcp_pulse);
2790 }
2791 }
2792
2793 if (bp->stats_state == STATS_STATE_DISABLE)
f1410647 2794 goto timer_restart;
a2fbb9ea
ET
2795
2796 bnx2x_update_stats(bp);
2797
f1410647 2798timer_restart:
a2fbb9ea
ET
2799 mod_timer(&bp->timer, jiffies + bp->current_interval);
2800}
2801
2802/* end of Statistics */
2803
2804/* nic init */
2805
2806/*
2807 * nic init service functions
2808 */
2809
2810static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
2811 dma_addr_t mapping, int id)
2812{
2813 int port = bp->port;
2814 u64 section;
2815 int index;
2816
2817 /* USTORM */
2818 section = ((u64)mapping) + offsetof(struct host_status_block,
2819 u_status_block);
2820 sb->u_status_block.status_block_id = id;
2821
2822 REG_WR(bp, BAR_USTRORM_INTMEM +
2823 USTORM_SB_HOST_SB_ADDR_OFFSET(port, id), U64_LO(section));
2824 REG_WR(bp, BAR_USTRORM_INTMEM +
2825 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, id)) + 4),
2826 U64_HI(section));
2827
2828 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
2829 REG_WR16(bp, BAR_USTRORM_INTMEM +
2830 USTORM_SB_HC_DISABLE_OFFSET(port, id, index), 0x1);
2831
2832 /* CSTORM */
2833 section = ((u64)mapping) + offsetof(struct host_status_block,
2834 c_status_block);
2835 sb->c_status_block.status_block_id = id;
2836
2837 REG_WR(bp, BAR_CSTRORM_INTMEM +
2838 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, id), U64_LO(section));
2839 REG_WR(bp, BAR_CSTRORM_INTMEM +
2840 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, id)) + 4),
2841 U64_HI(section));
2842
2843 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
2844 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2845 CSTORM_SB_HC_DISABLE_OFFSET(port, id, index), 0x1);
2846
2847 bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
2848}
2849
2850static void bnx2x_init_def_sb(struct bnx2x *bp,
2851 struct host_def_status_block *def_sb,
2852 dma_addr_t mapping, int id)
2853{
2854 int port = bp->port;
2855 int index, val, reg_offset;
2856 u64 section;
2857
2858 /* ATTN */
2859 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2860 atten_status_block);
2861 def_sb->atten_status_block.status_block_id = id;
2862
49d66772
ET
2863 bp->def_att_idx = 0;
2864 bp->attn_state = 0;
2865
a2fbb9ea
ET
2866 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2867 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2868
2869 for (index = 0; index < 3; index++) {
2870 bp->attn_group[index].sig[0] = REG_RD(bp,
2871 reg_offset + 0x10*index);
2872 bp->attn_group[index].sig[1] = REG_RD(bp,
2873 reg_offset + 0x4 + 0x10*index);
2874 bp->attn_group[index].sig[2] = REG_RD(bp,
2875 reg_offset + 0x8 + 0x10*index);
2876 bp->attn_group[index].sig[3] = REG_RD(bp,
2877 reg_offset + 0xc + 0x10*index);
2878 }
2879
2880 bp->aeu_mask = REG_RD(bp, (port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2881 MISC_REG_AEU_MASK_ATTN_FUNC_0));
2882
2883 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
2884 HC_REG_ATTN_MSG0_ADDR_L);
2885
2886 REG_WR(bp, reg_offset, U64_LO(section));
2887 REG_WR(bp, reg_offset + 4, U64_HI(section));
2888
2889 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
2890
2891 val = REG_RD(bp, reg_offset);
2892 val |= id;
2893 REG_WR(bp, reg_offset, val);
2894
2895 /* USTORM */
2896 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2897 u_def_status_block);
2898 def_sb->u_def_status_block.status_block_id = id;
2899
49d66772
ET
2900 bp->def_u_idx = 0;
2901
a2fbb9ea
ET
2902 REG_WR(bp, BAR_USTRORM_INTMEM +
2903 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
2904 REG_WR(bp, BAR_USTRORM_INTMEM +
2905 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
2906 U64_HI(section));
2907 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port),
2908 BNX2X_BTR);
2909
2910 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
2911 REG_WR16(bp, BAR_USTRORM_INTMEM +
2912 USTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
2913
2914 /* CSTORM */
2915 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2916 c_def_status_block);
2917 def_sb->c_def_status_block.status_block_id = id;
2918
49d66772
ET
2919 bp->def_c_idx = 0;
2920
a2fbb9ea
ET
2921 REG_WR(bp, BAR_CSTRORM_INTMEM +
2922 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
2923 REG_WR(bp, BAR_CSTRORM_INTMEM +
2924 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
2925 U64_HI(section));
2926 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port),
2927 BNX2X_BTR);
2928
2929 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
2930 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2931 CSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
2932
2933 /* TSTORM */
2934 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2935 t_def_status_block);
2936 def_sb->t_def_status_block.status_block_id = id;
2937
49d66772
ET
2938 bp->def_t_idx = 0;
2939
a2fbb9ea
ET
2940 REG_WR(bp, BAR_TSTRORM_INTMEM +
2941 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
2942 REG_WR(bp, BAR_TSTRORM_INTMEM +
2943 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
2944 U64_HI(section));
2945 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port),
2946 BNX2X_BTR);
2947
2948 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
2949 REG_WR16(bp, BAR_TSTRORM_INTMEM +
2950 TSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
2951
2952 /* XSTORM */
2953 section = ((u64)mapping) + offsetof(struct host_def_status_block,
2954 x_def_status_block);
2955 def_sb->x_def_status_block.status_block_id = id;
2956
49d66772
ET
2957 bp->def_x_idx = 0;
2958
a2fbb9ea
ET
2959 REG_WR(bp, BAR_XSTRORM_INTMEM +
2960 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port), U64_LO(section));
2961 REG_WR(bp, BAR_XSTRORM_INTMEM +
2962 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(port)) + 4),
2963 U64_HI(section));
2964 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port),
2965 BNX2X_BTR);
2966
2967 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
2968 REG_WR16(bp, BAR_XSTRORM_INTMEM +
2969 XSTORM_DEF_SB_HC_DISABLE_OFFSET(port, index), 0x1);
2970
49d66772
ET
2971 bp->stat_pending = 0;
2972
a2fbb9ea
ET
2973 bnx2x_ack_sb(bp, id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
2974}
2975
2976static void bnx2x_update_coalesce(struct bnx2x *bp)
2977{
2978 int port = bp->port;
2979 int i;
2980
2981 for_each_queue(bp, i) {
2982
2983 /* HC_INDEX_U_ETH_RX_CQ_CONS */
2984 REG_WR8(bp, BAR_USTRORM_INTMEM +
2985 USTORM_SB_HC_TIMEOUT_OFFSET(port, i,
2986 HC_INDEX_U_ETH_RX_CQ_CONS),
2987 bp->rx_ticks_int/12);
2988 REG_WR16(bp, BAR_USTRORM_INTMEM +
2989 USTORM_SB_HC_DISABLE_OFFSET(port, i,
2990 HC_INDEX_U_ETH_RX_CQ_CONS),
2991 bp->rx_ticks_int ? 0 : 1);
2992
2993 /* HC_INDEX_C_ETH_TX_CQ_CONS */
2994 REG_WR8(bp, BAR_CSTRORM_INTMEM +
2995 CSTORM_SB_HC_TIMEOUT_OFFSET(port, i,
2996 HC_INDEX_C_ETH_TX_CQ_CONS),
2997 bp->tx_ticks_int/12);
2998 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2999 CSTORM_SB_HC_DISABLE_OFFSET(port, i,
3000 HC_INDEX_C_ETH_TX_CQ_CONS),
3001 bp->tx_ticks_int ? 0 : 1);
3002 }
3003}
3004
3005static void bnx2x_init_rx_rings(struct bnx2x *bp)
3006{
3007 u16 ring_prod;
3008 int i, j;
3009 int port = bp->port;
3010
3011 bp->rx_buf_use_size = bp->dev->mtu;
3012
3013 bp->rx_buf_use_size += bp->rx_offset + ETH_OVREHEAD;
3014 bp->rx_buf_size = bp->rx_buf_use_size + 64;
3015
3016 for_each_queue(bp, j) {
3017 struct bnx2x_fastpath *fp = &bp->fp[j];
3018
3019 fp->rx_bd_cons = 0;
3020 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
3021
3022 for (i = 1; i <= NUM_RX_RINGS; i++) {
3023 struct eth_rx_bd *rx_bd;
3024
3025 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
3026 rx_bd->addr_hi =
3027 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
3028 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
3029 rx_bd->addr_lo =
3030 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
3031 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
3032
3033 }
3034
3035 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
3036 struct eth_rx_cqe_next_page *nextpg;
3037
3038 nextpg = (struct eth_rx_cqe_next_page *)
3039 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
3040 nextpg->addr_hi =
3041 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
3042 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3043 nextpg->addr_lo =
3044 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
3045 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
3046 }
3047
3048 /* rx completion queue */
3049 fp->rx_comp_cons = ring_prod = 0;
3050
3051 for (i = 0; i < bp->rx_ring_size; i++) {
3052 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
3053 BNX2X_ERR("was only able to allocate "
3054 "%d rx skbs\n", i);
3055 break;
3056 }
3057 ring_prod = NEXT_RX_IDX(ring_prod);
3058 BUG_TRAP(ring_prod > i);
3059 }
3060
3061 fp->rx_bd_prod = fp->rx_comp_prod = ring_prod;
3062 fp->rx_pkt = fp->rx_calls = 0;
3063
c14423fe 3064 /* Warning! this will generate an interrupt (to the TSTORM) */
a2fbb9ea
ET
3065 /* must only be done when chip is initialized */
3066 REG_WR(bp, BAR_TSTRORM_INTMEM +
3067 TSTORM_RCQ_PROD_OFFSET(port, j), ring_prod);
3068 if (j != 0)
3069 continue;
3070
3071 REG_WR(bp, BAR_USTRORM_INTMEM +
3072 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(port),
3073 U64_LO(fp->rx_comp_mapping));
3074 REG_WR(bp, BAR_USTRORM_INTMEM +
3075 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(port) + 4,
3076 U64_HI(fp->rx_comp_mapping));
3077 }
3078}
3079
3080static void bnx2x_init_tx_ring(struct bnx2x *bp)
3081{
3082 int i, j;
3083
3084 for_each_queue(bp, j) {
3085 struct bnx2x_fastpath *fp = &bp->fp[j];
3086
3087 for (i = 1; i <= NUM_TX_RINGS; i++) {
3088 struct eth_tx_bd *tx_bd =
3089 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
3090
3091 tx_bd->addr_hi =
3092 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
3093 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
3094 tx_bd->addr_lo =
3095 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
3096 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
3097 }
3098
3099 fp->tx_pkt_prod = 0;
3100 fp->tx_pkt_cons = 0;
3101 fp->tx_bd_prod = 0;
3102 fp->tx_bd_cons = 0;
3103 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
3104 fp->tx_pkt = 0;
3105 }
3106}
3107
3108static void bnx2x_init_sp_ring(struct bnx2x *bp)
3109{
3110 int port = bp->port;
3111
3112 spin_lock_init(&bp->spq_lock);
3113
3114 bp->spq_left = MAX_SPQ_PENDING;
3115 bp->spq_prod_idx = 0;
a2fbb9ea
ET
3116 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
3117 bp->spq_prod_bd = bp->spq;
3118 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
3119
3120 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PAGE_BASE_OFFSET(port),
3121 U64_LO(bp->spq_mapping));
3122 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PAGE_BASE_OFFSET(port) + 4,
3123 U64_HI(bp->spq_mapping));
3124
3125 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(port),
3126 bp->spq_prod_idx);
3127}
3128
3129static void bnx2x_init_context(struct bnx2x *bp)
3130{
3131 int i;
3132
3133 for_each_queue(bp, i) {
3134 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
3135 struct bnx2x_fastpath *fp = &bp->fp[i];
3136
3137 context->xstorm_st_context.tx_bd_page_base_hi =
3138 U64_HI(fp->tx_desc_mapping);
3139 context->xstorm_st_context.tx_bd_page_base_lo =
3140 U64_LO(fp->tx_desc_mapping);
3141 context->xstorm_st_context.db_data_addr_hi =
3142 U64_HI(fp->tx_prods_mapping);
3143 context->xstorm_st_context.db_data_addr_lo =
3144 U64_LO(fp->tx_prods_mapping);
3145
3146 context->ustorm_st_context.rx_bd_page_base_hi =
3147 U64_HI(fp->rx_desc_mapping);
3148 context->ustorm_st_context.rx_bd_page_base_lo =
3149 U64_LO(fp->rx_desc_mapping);
3150 context->ustorm_st_context.status_block_id = i;
3151 context->ustorm_st_context.sb_index_number =
3152 HC_INDEX_U_ETH_RX_CQ_CONS;
3153 context->ustorm_st_context.rcq_base_address_hi =
3154 U64_HI(fp->rx_comp_mapping);
3155 context->ustorm_st_context.rcq_base_address_lo =
3156 U64_LO(fp->rx_comp_mapping);
3157 context->ustorm_st_context.flags =
3158 USTORM_ETH_ST_CONTEXT_ENABLE_MC_ALIGNMENT;
3159 context->ustorm_st_context.mc_alignment_size = 64;
3160 context->ustorm_st_context.num_rss = bp->num_queues;
3161
3162 context->cstorm_st_context.sb_index_number =
3163 HC_INDEX_C_ETH_TX_CQ_CONS;
3164 context->cstorm_st_context.status_block_id = i;
3165
3166 context->xstorm_ag_context.cdu_reserved =
3167 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
3168 CDU_REGION_NUMBER_XCM_AG,
3169 ETH_CONNECTION_TYPE);
3170 context->ustorm_ag_context.cdu_usage =
3171 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
3172 CDU_REGION_NUMBER_UCM_AG,
3173 ETH_CONNECTION_TYPE);
3174 }
3175}
3176
3177static void bnx2x_init_ind_table(struct bnx2x *bp)
3178{
3179 int port = bp->port;
3180 int i;
3181
3182 if (!is_multi(bp))
3183 return;
3184
3185 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
3186 REG_WR8(bp, TSTORM_INDIRECTION_TABLE_OFFSET(port) + i,
3187 i % bp->num_queues);
3188
3189 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3190}
3191
49d66772
ET
3192static void bnx2x_set_client_config(struct bnx2x *bp)
3193{
3194#ifdef BCM_VLAN
3195 int mode = bp->rx_mode;
3196#endif
3197 int i, port = bp->port;
3198 struct tstorm_eth_client_config tstorm_client = {0};
3199
3200 tstorm_client.mtu = bp->dev->mtu;
3201 tstorm_client.statistics_counter_id = 0;
3202 tstorm_client.config_flags =
3203 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE;
3204#ifdef BCM_VLAN
3205 if (mode && bp->vlgrp) {
3206 tstorm_client.config_flags |=
3207 TSTORM_ETH_CLIENT_CONFIG_VLAN_REMOVAL_ENABLE;
3208 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
3209 }
3210#endif
3211 if (mode != BNX2X_RX_MODE_PROMISC)
3212 tstorm_client.drop_flags =
3213 TSTORM_ETH_CLIENT_CONFIG_DROP_MAC_ERR;
3214
3215 for_each_queue(bp, i) {
3216 REG_WR(bp, BAR_TSTRORM_INTMEM +
3217 TSTORM_CLIENT_CONFIG_OFFSET(port, i),
3218 ((u32 *)&tstorm_client)[0]);
3219 REG_WR(bp, BAR_TSTRORM_INTMEM +
3220 TSTORM_CLIENT_CONFIG_OFFSET(port, i) + 4,
3221 ((u32 *)&tstorm_client)[1]);
3222 }
3223
3224/* DP(NETIF_MSG_IFUP, "tstorm_client: 0x%08x 0x%08x\n",
3225 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]); */
3226}
3227
a2fbb9ea
ET
3228static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
3229{
3230 int mode = bp->rx_mode;
3231 int port = bp->port;
3232 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
3233 int i;
3234
3235 DP(NETIF_MSG_RX_STATUS, "rx mode is %d\n", mode);
3236
3237 switch (mode) {
3238 case BNX2X_RX_MODE_NONE: /* no Rx */
3239 tstorm_mac_filter.ucast_drop_all = 1;
3240 tstorm_mac_filter.mcast_drop_all = 1;
3241 tstorm_mac_filter.bcast_drop_all = 1;
3242 break;
3243 case BNX2X_RX_MODE_NORMAL:
3244 tstorm_mac_filter.bcast_accept_all = 1;
3245 break;
3246 case BNX2X_RX_MODE_ALLMULTI:
3247 tstorm_mac_filter.mcast_accept_all = 1;
3248 tstorm_mac_filter.bcast_accept_all = 1;
3249 break;
3250 case BNX2X_RX_MODE_PROMISC:
3251 tstorm_mac_filter.ucast_accept_all = 1;
3252 tstorm_mac_filter.mcast_accept_all = 1;
3253 tstorm_mac_filter.bcast_accept_all = 1;
3254 break;
3255 default:
3256 BNX2X_ERR("bad rx mode (%d)\n", mode);
3257 }
3258
3259 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
3260 REG_WR(bp, BAR_TSTRORM_INTMEM +
3261 TSTORM_MAC_FILTER_CONFIG_OFFSET(port) + i * 4,
3262 ((u32 *)&tstorm_mac_filter)[i]);
3263
3264/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
3265 ((u32 *)&tstorm_mac_filter)[i]); */
3266 }
a2fbb9ea 3267
49d66772
ET
3268 if (mode != BNX2X_RX_MODE_NONE)
3269 bnx2x_set_client_config(bp);
a2fbb9ea
ET
3270}
3271
3272static void bnx2x_init_internal(struct bnx2x *bp)
3273{
3274 int port = bp->port;
3275 struct tstorm_eth_function_common_config tstorm_config = {0};
3276 struct stats_indication_flags stats_flags = {0};
a2fbb9ea
ET
3277
3278 if (is_multi(bp)) {
3279 tstorm_config.config_flags = MULTI_FLAGS;
3280 tstorm_config.rss_result_mask = MULTI_MASK;
3281 }
3282
3283 REG_WR(bp, BAR_TSTRORM_INTMEM +
3284 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(port),
3285 (*(u32 *)&tstorm_config));
3286
3287/* DP(NETIF_MSG_IFUP, "tstorm_config: 0x%08x\n",
3288 (*(u32 *)&tstorm_config)); */
3289
c14423fe 3290 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
3291 bnx2x_set_storm_rx_mode(bp);
3292
a2fbb9ea
ET
3293 stats_flags.collect_eth = cpu_to_le32(1);
3294
3295 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port),
3296 ((u32 *)&stats_flags)[0]);
3297 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(port) + 4,
3298 ((u32 *)&stats_flags)[1]);
3299
3300 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port),
3301 ((u32 *)&stats_flags)[0]);
3302 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(port) + 4,
3303 ((u32 *)&stats_flags)[1]);
3304
3305 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port),
3306 ((u32 *)&stats_flags)[0]);
3307 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(port) + 4,
3308 ((u32 *)&stats_flags)[1]);
3309
3310/* DP(NETIF_MSG_IFUP, "stats_flags: 0x%08x 0x%08x\n",
3311 ((u32 *)&stats_flags)[0], ((u32 *)&stats_flags)[1]); */
3312}
3313
3314static void bnx2x_nic_init(struct bnx2x *bp)
3315{
3316 int i;
3317
3318 for_each_queue(bp, i) {
3319 struct bnx2x_fastpath *fp = &bp->fp[i];
3320
3321 fp->state = BNX2X_FP_STATE_CLOSED;
3322 DP(NETIF_MSG_IFUP, "bnx2x_init_sb(%p,%p,%d);\n",
3323 bp, fp->status_blk, i);
3324 fp->index = i;
3325 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping, i);
3326 }
3327
3328 bnx2x_init_def_sb(bp, bp->def_status_blk,
3329 bp->def_status_blk_mapping, 0x10);
3330 bnx2x_update_coalesce(bp);
3331 bnx2x_init_rx_rings(bp);
3332 bnx2x_init_tx_ring(bp);
3333 bnx2x_init_sp_ring(bp);
3334 bnx2x_init_context(bp);
3335 bnx2x_init_internal(bp);
3336 bnx2x_init_stats(bp);
3337 bnx2x_init_ind_table(bp);
615f8fd9 3338 bnx2x_int_enable(bp);
a2fbb9ea
ET
3339
3340}
3341
3342/* end of nic init */
3343
3344/*
3345 * gzip service functions
3346 */
3347
3348static int bnx2x_gunzip_init(struct bnx2x *bp)
3349{
3350 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
3351 &bp->gunzip_mapping);
3352 if (bp->gunzip_buf == NULL)
3353 goto gunzip_nomem1;
3354
3355 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
3356 if (bp->strm == NULL)
3357 goto gunzip_nomem2;
3358
3359 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
3360 GFP_KERNEL);
3361 if (bp->strm->workspace == NULL)
3362 goto gunzip_nomem3;
3363
3364 return 0;
3365
3366gunzip_nomem3:
3367 kfree(bp->strm);
3368 bp->strm = NULL;
3369
3370gunzip_nomem2:
3371 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
3372 bp->gunzip_mapping);
3373 bp->gunzip_buf = NULL;
3374
3375gunzip_nomem1:
3376 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
3377 " uncompression\n", bp->dev->name);
3378 return -ENOMEM;
3379}
3380
3381static void bnx2x_gunzip_end(struct bnx2x *bp)
3382{
3383 kfree(bp->strm->workspace);
3384
3385 kfree(bp->strm);
3386 bp->strm = NULL;
3387
3388 if (bp->gunzip_buf) {
3389 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
3390 bp->gunzip_mapping);
3391 bp->gunzip_buf = NULL;
3392 }
3393}
3394
3395static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
3396{
3397 int n, rc;
3398
3399 /* check gzip header */
3400 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
3401 return -EINVAL;
3402
3403 n = 10;
3404
3405#define FNAME 0x8
3406
3407 if (zbuf[3] & FNAME)
3408 while ((zbuf[n++] != 0) && (n < len));
3409
3410 bp->strm->next_in = zbuf + n;
3411 bp->strm->avail_in = len - n;
3412 bp->strm->next_out = bp->gunzip_buf;
3413 bp->strm->avail_out = FW_BUF_SIZE;
3414
3415 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
3416 if (rc != Z_OK)
3417 return rc;
3418
3419 rc = zlib_inflate(bp->strm, Z_FINISH);
3420 if ((rc != Z_OK) && (rc != Z_STREAM_END))
3421 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
3422 bp->dev->name, bp->strm->msg);
3423
3424 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
3425 if (bp->gunzip_outlen & 0x3)
3426 printk(KERN_ERR PFX "%s: Firmware decompression error:"
3427 " gunzip_outlen (%d) not aligned\n",
3428 bp->dev->name, bp->gunzip_outlen);
3429 bp->gunzip_outlen >>= 2;
3430
3431 zlib_inflateEnd(bp->strm);
3432
3433 if (rc == Z_STREAM_END)
3434 return 0;
3435
3436 return rc;
3437}
3438
3439/* nic load/unload */
3440
3441/*
3442 * general service functions
3443 */
3444
3445/* send a NIG loopback debug packet */
3446static void bnx2x_lb_pckt(struct bnx2x *bp)
3447{
3448#ifdef USE_DMAE
3449 u32 wb_write[3];
3450#endif
3451
3452 /* Ethernet source and destination addresses */
3453#ifdef USE_DMAE
3454 wb_write[0] = 0x55555555;
3455 wb_write[1] = 0x55555555;
3456 wb_write[2] = 0x20; /* SOP */
3457 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
3458#else
3459 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB, 0x55555555);
3460 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 4, 0x55555555);
3461 /* SOP */
3462 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 8, 0x20);
3463#endif
3464
3465 /* NON-IP protocol */
3466#ifdef USE_DMAE
3467 wb_write[0] = 0x09000000;
3468 wb_write[1] = 0x55555555;
3469 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
3470 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
3471#else
3472 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB, 0x09000000);
3473 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 4, 0x55555555);
3474 /* EOP, eop_bvalid = 0 */
3475 REG_WR_IND(bp, NIG_REG_DEBUG_PACKET_LB + 8, 0x10);
3476#endif
3477}
3478
3479/* some of the internal memories
3480 * are not directly readable from the driver
3481 * to test them we send debug packets
3482 */
3483static int bnx2x_int_mem_test(struct bnx2x *bp)
3484{
3485 int factor;
3486 int count, i;
3487 u32 val = 0;
3488
ad8d3948 3489 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 3490 factor = 120;
ad8d3948
EG
3491 else if (CHIP_REV_IS_EMUL(bp))
3492 factor = 200;
3493 else
a2fbb9ea 3494 factor = 1;
a2fbb9ea
ET
3495
3496 DP(NETIF_MSG_HW, "start part1\n");
3497
3498 /* Disable inputs of parser neighbor blocks */
3499 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
3500 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
3501 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3502 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
3503
3504 /* Write 0 to parser credits for CFC search request */
3505 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
3506
3507 /* send Ethernet packet */
3508 bnx2x_lb_pckt(bp);
3509
3510 /* TODO do i reset NIG statistic? */
3511 /* Wait until NIG register shows 1 packet of size 0x10 */
3512 count = 1000 * factor;
3513 while (count) {
3514#ifdef BNX2X_DMAE_RD
3515 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
3516 val = *bnx2x_sp(bp, wb_data[0]);
3517#else
3518 val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
3519 REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
3520#endif
3521 if (val == 0x10)
3522 break;
3523
3524 msleep(10);
3525 count--;
3526 }
3527 if (val != 0x10) {
3528 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
3529 return -1;
3530 }
3531
3532 /* Wait until PRS register shows 1 packet */
3533 count = 1000 * factor;
3534 while (count) {
3535 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3536
3537 if (val == 1)
3538 break;
3539
3540 msleep(10);
3541 count--;
3542 }
3543 if (val != 0x1) {
3544 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3545 return -2;
3546 }
3547
3548 /* Reset and init BRB, PRS */
3549 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x3);
3550 msleep(50);
3551 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x3);
3552 msleep(50);
3553 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
3554 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
3555
3556 DP(NETIF_MSG_HW, "part2\n");
3557
3558 /* Disable inputs of parser neighbor blocks */
3559 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
3560 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
3561 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3562 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x0);
3563
3564 /* Write 0 to parser credits for CFC search request */
3565 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
3566
3567 /* send 10 Ethernet packets */
3568 for (i = 0; i < 10; i++)
3569 bnx2x_lb_pckt(bp);
3570
3571 /* Wait until NIG register shows 10 + 1
3572 packets of size 11*0x10 = 0xb0 */
3573 count = 1000 * factor;
3574 while (count) {
3575#ifdef BNX2X_DMAE_RD
3576 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
3577 val = *bnx2x_sp(bp, wb_data[0]);
3578#else
3579 val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
3580 REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
3581#endif
3582 if (val == 0xb0)
3583 break;
3584
3585 msleep(10);
3586 count--;
3587 }
3588 if (val != 0xb0) {
3589 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
3590 return -3;
3591 }
3592
3593 /* Wait until PRS register shows 2 packets */
3594 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3595 if (val != 2)
3596 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3597
3598 /* Write 1 to parser credits for CFC search request */
3599 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
3600
3601 /* Wait until PRS register shows 3 packets */
3602 msleep(10 * factor);
3603 /* Wait until NIG register shows 1 packet of size 0x10 */
3604 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3605 if (val != 3)
3606 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3607
3608 /* clear NIG EOP FIFO */
3609 for (i = 0; i < 11; i++)
3610 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
3611 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
3612 if (val != 1) {
3613 BNX2X_ERR("clear of NIG failed\n");
3614 return -4;
3615 }
3616
3617 /* Reset and init BRB, PRS, NIG */
3618 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
3619 msleep(50);
3620 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
3621 msleep(50);
3622 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
3623 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
3624#ifndef BCM_ISCSI
3625 /* set NIC mode */
3626 REG_WR(bp, PRS_REG_NIC_MODE, 1);
3627#endif
3628
3629 /* Enable inputs of parser neighbor blocks */
3630 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
3631 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
3632 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3633 NIG_WR(NIG_REG_PRS_REQ_IN_EN, 0x1);
3634
3635 DP(NETIF_MSG_HW, "done\n");
3636
3637 return 0; /* OK */
3638}
3639
3640static void enable_blocks_attention(struct bnx2x *bp)
3641{
3642 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
3643 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
3644 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
3645 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
3646 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
3647 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
3648 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
3649 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
3650 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
3651/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
3652/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
3653 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
3654 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
3655 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
3656/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
3657/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
3658 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
3659 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
3660 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
3661 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
3662/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
3663/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
3664 REG_WR(bp, PXP2_REG_PXP2_INT_MASK, 0x480000);
3665 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
3666 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
3667 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
3668/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
3669/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
3670 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
3671 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
3672/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
3673 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
3674}
3675
3676static int bnx2x_function_init(struct bnx2x *bp, int mode)
3677{
3678 int func = bp->port;
3679 int port = func ? PORT1 : PORT0;
3680 u32 val, i;
3681#ifdef USE_DMAE
3682 u32 wb_write[2];
3683#endif
3684
3685 DP(BNX2X_MSG_MCP, "function is %d mode is %x\n", func, mode);
3686 if ((func != 0) && (func != 1)) {
3687 BNX2X_ERR("BAD function number (%d)\n", func);
3688 return -ENODEV;
3689 }
3690
3691 bnx2x_gunzip_init(bp);
3692
3693 if (mode & 0x1) { /* init common */
3694 DP(BNX2X_MSG_MCP, "starting common init func %d mode %x\n",
3695 func, mode);
f1410647
ET
3696 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
3697 0xffffffff);
619c714c 3698 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET,
f1410647 3699 0xfffc);
a2fbb9ea
ET
3700 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
3701
3702 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
3703 msleep(30);
3704 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
3705
3706 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
3707 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
3708
3709 bnx2x_init_pxp(bp);
3710
3711 if (CHIP_REV(bp) == CHIP_REV_Ax) {
3712 /* enable HW interrupt from PXP on USDM
3713 overflow bit 16 on INT_MASK_0 */
3714 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
3715 }
3716
3717#ifdef __BIG_ENDIAN
3718 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
3719 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
3720 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
3721 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
3722 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
3723 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 1);
3724
3725/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
3726 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
3727 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
3728 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
3729 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
3730#endif
3731
3732#ifndef BCM_ISCSI
3733 /* set NIC mode */
3734 REG_WR(bp, PRS_REG_NIC_MODE, 1);
3735#endif
3736
3737 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 5);
3738#ifdef BCM_ISCSI
3739 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
3740 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
3741 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
3742#endif
3743
3744 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
3745
3746 /* let the HW do it's magic ... */
3747 msleep(100);
3748 /* finish PXP init
3749 (can be moved up if we want to use the DMAE) */
3750 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
3751 if (val != 1) {
3752 BNX2X_ERR("PXP2 CFG failed\n");
3753 return -EBUSY;
3754 }
3755
3756 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
3757 if (val != 1) {
3758 BNX2X_ERR("PXP2 RD_INIT failed\n");
3759 return -EBUSY;
3760 }
3761
3762 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
3763 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
3764
3765 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
3766
3767 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
3768 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
3769 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
3770 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
3771
3772#ifdef BNX2X_DMAE_RD
3773 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
3774 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
3775 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
3776 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
3777#else
3778 REG_RD(bp, XSEM_REG_PASSIVE_BUFFER);
3779 REG_RD(bp, XSEM_REG_PASSIVE_BUFFER + 4);
3780 REG_RD(bp, XSEM_REG_PASSIVE_BUFFER + 8);
3781 REG_RD(bp, CSEM_REG_PASSIVE_BUFFER);
3782 REG_RD(bp, CSEM_REG_PASSIVE_BUFFER + 4);
3783 REG_RD(bp, CSEM_REG_PASSIVE_BUFFER + 8);
3784 REG_RD(bp, TSEM_REG_PASSIVE_BUFFER);
3785 REG_RD(bp, TSEM_REG_PASSIVE_BUFFER + 4);
3786 REG_RD(bp, TSEM_REG_PASSIVE_BUFFER + 8);
3787 REG_RD(bp, USEM_REG_PASSIVE_BUFFER);
3788 REG_RD(bp, USEM_REG_PASSIVE_BUFFER + 4);
3789 REG_RD(bp, USEM_REG_PASSIVE_BUFFER + 8);
3790#endif
3791 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
c14423fe 3792 /* soft reset pulse */
a2fbb9ea
ET
3793 REG_WR(bp, QM_REG_SOFT_RESET, 1);
3794 REG_WR(bp, QM_REG_SOFT_RESET, 0);
3795
3796#ifdef BCM_ISCSI
3797 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
3798#endif
3799 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
3800 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_BITS);
3801 if (CHIP_REV(bp) == CHIP_REV_Ax) {
3802 /* enable hw interrupt from doorbell Q */
3803 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
3804 }
3805
3806 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
3807
3808 if (CHIP_REV_IS_SLOW(bp)) {
3809 /* fix for emulation and FPGA for no pause */
3810 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
3811 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
3812 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
3813 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
3814 }
3815
3816 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
3817
3818 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
3819 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
3820 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
3821 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
3822
ad8d3948
EG
3823 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
3824 STORM_INTMEM_SIZE_E1);
3825 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
3826 STORM_INTMEM_SIZE_E1);
3827 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
3828 STORM_INTMEM_SIZE_E1);
3829 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
3830 STORM_INTMEM_SIZE_E1);
a2fbb9ea
ET
3831
3832 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
3833 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
3834 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
3835 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
3836
3837 /* sync semi rtc */
3838 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
3839 0x80000000);
3840 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
3841 0x80000000);
3842
3843 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
3844 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
3845 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
3846
3847 REG_WR(bp, SRC_REG_SOFT_RST, 1);
3848 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
3849 REG_WR(bp, i, 0xc0cac01a);
c14423fe 3850 /* TODO: replace with something meaningful */
a2fbb9ea
ET
3851 }
3852 /* SRCH COMMON comes here */
3853 REG_WR(bp, SRC_REG_SOFT_RST, 0);
3854
3855 if (sizeof(union cdu_context) != 1024) {
3856 /* we currently assume that a context is 1024 bytes */
3857 printk(KERN_ALERT PFX "please adjust the size of"
3858 " cdu_context(%ld)\n",
3859 (long)sizeof(union cdu_context));
3860 }
3861 val = (4 << 24) + (0 << 12) + 1024;
3862 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
3863 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
3864
3865 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
3866 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
3867
3868 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
3869 bnx2x_init_block(bp, MISC_AEU_COMMON_START,
3870 MISC_AEU_COMMON_END);
3871 /* RXPCS COMMON comes here */
3872 /* EMAC0 COMMON comes here */
3873 /* EMAC1 COMMON comes here */
3874 /* DBU COMMON comes here */
3875 /* DBG COMMON comes here */
3876 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
3877
3878 if (CHIP_REV_IS_SLOW(bp))
3879 msleep(200);
3880
3881 /* finish CFC init */
3882 val = REG_RD(bp, CFC_REG_LL_INIT_DONE);
3883 if (val != 1) {
3884 BNX2X_ERR("CFC LL_INIT failed\n");
3885 return -EBUSY;
3886 }
3887
3888 val = REG_RD(bp, CFC_REG_AC_INIT_DONE);
3889 if (val != 1) {
3890 BNX2X_ERR("CFC AC_INIT failed\n");
3891 return -EBUSY;
3892 }
3893
3894 val = REG_RD(bp, CFC_REG_CAM_INIT_DONE);
3895 if (val != 1) {
3896 BNX2X_ERR("CFC CAM_INIT failed\n");
3897 return -EBUSY;
3898 }
3899
3900 REG_WR(bp, CFC_REG_DEBUG0, 0);
3901
3902 /* read NIG statistic
3903 to see if this is our first up since powerup */
3904#ifdef BNX2X_DMAE_RD
3905 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
3906 val = *bnx2x_sp(bp, wb_data[0]);
3907#else
3908 val = REG_RD(bp, NIG_REG_STAT2_BRB_OCTET);
3909 REG_RD(bp, NIG_REG_STAT2_BRB_OCTET + 4);
3910#endif
3911 /* do internal memory self test */
3912 if ((val == 0) && bnx2x_int_mem_test(bp)) {
3913 BNX2X_ERR("internal mem selftest failed\n");
3914 return -EBUSY;
3915 }
3916
3917 /* clear PXP2 attentions */
3918 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR);
3919
3920 enable_blocks_attention(bp);
3921 /* enable_blocks_parity(bp); */
3922
f1410647
ET
3923 switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
3924 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
3925 /* Fan failure is indicated by SPIO 5 */
3926 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
3927 MISC_REGISTERS_SPIO_INPUT_HI_Z);
3928
3929 /* set to active low mode */
3930 val = REG_RD(bp, MISC_REG_SPIO_INT);
3931 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
3932 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
3933 REG_WR(bp, MISC_REG_SPIO_INT, val);
3934
3935 /* enable interrupt to signal the IGU */
3936 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
3937 val |= (1 << MISC_REGISTERS_SPIO_5);
3938 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
3939 break;
3940
3941 default:
3942 break;
3943 }
3944
a2fbb9ea
ET
3945 } /* end of common init */
3946
3947 /* per port init */
3948
3949 /* the phys address is shifted right 12 bits and has an added
3950 1=valid bit added to the 53rd bit
3951 then since this is a wide register(TM)
3952 we split it into two 32 bit writes
3953 */
3954#define RQ_ONCHIP_AT_PORT_SIZE 384
3955#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
3956#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
3957#define PXP_ONE_ILT(x) ((x << 10) | x)
3958
3959 DP(BNX2X_MSG_MCP, "starting per-function init port is %x\n", func);
3960
3961 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + func*4, 0);
3962
3963 /* Port PXP comes here */
3964 /* Port PXP2 comes here */
3965
3966 /* Offset is
3967 * Port0 0
3968 * Port1 384 */
3969 i = func * RQ_ONCHIP_AT_PORT_SIZE;
3970#ifdef USE_DMAE
3971 wb_write[0] = ONCHIP_ADDR1(bnx2x_sp_mapping(bp, context));
3972 wb_write[1] = ONCHIP_ADDR2(bnx2x_sp_mapping(bp, context));
3973 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
3974#else
3975 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + i*8,
3976 ONCHIP_ADDR1(bnx2x_sp_mapping(bp, context)));
3977 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + i*8 + 4,
3978 ONCHIP_ADDR2(bnx2x_sp_mapping(bp, context)));
3979#endif
3980 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4, PXP_ONE_ILT(i));
3981
3982#ifdef BCM_ISCSI
3983 /* Port0 1
3984 * Port1 385 */
3985 i++;
3986 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
3987 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
3988 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
3989 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
3990
3991 /* Port0 2
3992 * Port1 386 */
3993 i++;
3994 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
3995 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
3996 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
3997 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
3998
3999 /* Port0 3
4000 * Port1 387 */
4001 i++;
4002 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
4003 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
4004 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
4005 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
4006#endif
4007
4008 /* Port TCM comes here */
4009 /* Port UCM comes here */
4010 /* Port CCM comes here */
4011 bnx2x_init_block(bp, func ? XCM_PORT1_START : XCM_PORT0_START,
4012 func ? XCM_PORT1_END : XCM_PORT0_END);
4013
4014#ifdef USE_DMAE
4015 wb_write[0] = 0;
4016 wb_write[1] = 0;
4017#endif
4018 for (i = 0; i < 32; i++) {
4019 REG_WR(bp, QM_REG_BASEADDR + (func*32 + i)*4, 1024 * 4 * i);
4020#ifdef USE_DMAE
4021 REG_WR_DMAE(bp, QM_REG_PTRTBL + (func*32 + i)*8, wb_write, 2);
4022#else
4023 REG_WR_IND(bp, QM_REG_PTRTBL + (func*32 + i)*8, 0);
4024 REG_WR_IND(bp, QM_REG_PTRTBL + (func*32 + i)*8 + 4, 0);
4025#endif
4026 }
4027 REG_WR(bp, QM_REG_CONNNUM_0 + func*4, 1024/16 - 1);
4028
4029 /* Port QM comes here */
4030
4031#ifdef BCM_ISCSI
4032 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
4033 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
4034
4035 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
4036 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
4037#endif
4038 /* Port DQ comes here */
4039 /* Port BRB1 comes here */
ad8d3948 4040 /* Port PRS comes here */
a2fbb9ea
ET
4041 /* Port TSDM comes here */
4042 /* Port CSDM comes here */
4043 /* Port USDM comes here */
4044 /* Port XSDM comes here */
4045 bnx2x_init_block(bp, func ? TSEM_PORT1_START : TSEM_PORT0_START,
4046 func ? TSEM_PORT1_END : TSEM_PORT0_END);
4047 bnx2x_init_block(bp, func ? USEM_PORT1_START : USEM_PORT0_START,
4048 func ? USEM_PORT1_END : USEM_PORT0_END);
4049 bnx2x_init_block(bp, func ? CSEM_PORT1_START : CSEM_PORT0_START,
4050 func ? CSEM_PORT1_END : CSEM_PORT0_END);
4051 bnx2x_init_block(bp, func ? XSEM_PORT1_START : XSEM_PORT0_START,
4052 func ? XSEM_PORT1_END : XSEM_PORT0_END);
4053 /* Port UPB comes here */
4054 /* Port XSDM comes here */
4055 bnx2x_init_block(bp, func ? PBF_PORT1_START : PBF_PORT0_START,
4056 func ? PBF_PORT1_END : PBF_PORT0_END);
4057
4058 /* configure PBF to work without PAUSE mtu 9000 */
4059 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + func*4, 0);
4060
4061 /* update threshold */
4062 REG_WR(bp, PBF_REG_P0_ARB_THRSH + func*4, (9040/16));
4063 /* update init credit */
4064 REG_WR(bp, PBF_REG_P0_INIT_CRD + func*4, (9040/16) + 553 - 22);
4065
4066 /* probe changes */
4067 REG_WR(bp, PBF_REG_INIT_P0 + func*4, 1);
4068 msleep(5);
4069 REG_WR(bp, PBF_REG_INIT_P0 + func*4, 0);
4070
4071#ifdef BCM_ISCSI
4072 /* tell the searcher where the T2 table is */
4073 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
4074
4075 wb_write[0] = U64_LO(bp->t2_mapping);
4076 wb_write[1] = U64_HI(bp->t2_mapping);
4077 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
4078 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
4079 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
4080 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
4081
4082 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
4083 /* Port SRCH comes here */
4084#endif
4085 /* Port CDU comes here */
4086 /* Port CFC comes here */
4087 bnx2x_init_block(bp, func ? HC_PORT1_START : HC_PORT0_START,
4088 func ? HC_PORT1_END : HC_PORT0_END);
4089 bnx2x_init_block(bp, func ? MISC_AEU_PORT1_START :
4090 MISC_AEU_PORT0_START,
4091 func ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
4092 /* Port PXPCS comes here */
4093 /* Port EMAC0 comes here */
4094 /* Port EMAC1 comes here */
4095 /* Port DBU comes here */
4096 /* Port DBG comes here */
4097 bnx2x_init_block(bp, func ? NIG_PORT1_START : NIG_PORT0_START,
4098 func ? NIG_PORT1_END : NIG_PORT0_END);
4099 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + func*4, 1);
4100 /* Port MCP comes here */
4101 /* Port DMAE comes here */
4102
f1410647
ET
4103 switch (bp->board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
4104 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
4105 /* add SPIO 5 to group 0 */
4106 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4107 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4108 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
4109 break;
4110
4111 default:
4112 break;
4113 }
4114
c18487ee 4115 bnx2x__link_reset(bp);
a2fbb9ea 4116
c14423fe 4117 /* Reset PCIE errors for debug */
a2fbb9ea
ET
4118 REG_WR(bp, 0x2114, 0xffffffff);
4119 REG_WR(bp, 0x2120, 0xffffffff);
4120 REG_WR(bp, 0x2814, 0xffffffff);
4121
4122 /* !!! move to init_values.h */
4123 REG_WR(bp, XSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
4124 REG_WR(bp, USDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
4125 REG_WR(bp, CSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
4126 REG_WR(bp, TSDM_REG_INIT_CREDIT_PXP_CTRL, 0x1);
4127
4128 REG_WR(bp, DBG_REG_PCI_REQ_CREDIT, 0x1);
4129 REG_WR(bp, TM_REG_PCIARB_CRDCNT_VAL, 0x1);
4130 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
4131 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x0);
4132
4133 bnx2x_gunzip_end(bp);
4134
4135 if (!nomcp) {
4136 port = bp->port;
4137
4138 bp->fw_drv_pulse_wr_seq =
f1410647 4139 (SHMEM_RD(bp, func_mb[port].drv_pulse_mb) &
a2fbb9ea 4140 DRV_PULSE_SEQ_MASK);
f1410647 4141 bp->fw_mb = SHMEM_RD(bp, func_mb[port].fw_mb_param);
a2fbb9ea
ET
4142 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x fw_mb 0x%x\n",
4143 bp->fw_drv_pulse_wr_seq, bp->fw_mb);
4144 } else {
4145 bp->fw_mb = 0;
4146 }
4147
4148 return 0;
4149}
4150
c14423fe 4151/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
4152static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
4153{
a2fbb9ea 4154 int port = bp->port;
f1410647
ET
4155 u32 seq = ++bp->fw_seq;
4156 u32 rc = 0;
a2fbb9ea 4157
f1410647
ET
4158 SHMEM_WR(bp, func_mb[port].drv_mb_header, (command | seq));
4159 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea
ET
4160
4161 /* let the FW do it's magic ... */
4162 msleep(100); /* TBD */
4163
4164 if (CHIP_REV_IS_SLOW(bp))
4165 msleep(900);
4166
f1410647 4167 rc = SHMEM_RD(bp, func_mb[port].fw_mb_header);
a2fbb9ea
ET
4168 DP(BNX2X_MSG_MCP, "read (%x) seq is (%x) from FW MB\n", rc, seq);
4169
4170 /* is this a reply to our command? */
4171 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
4172 rc &= FW_MSG_CODE_MASK;
f1410647 4173
a2fbb9ea
ET
4174 } else {
4175 /* FW BUG! */
4176 BNX2X_ERR("FW failed to respond!\n");
4177 bnx2x_fw_dump(bp);
4178 rc = 0;
4179 }
f1410647 4180
a2fbb9ea
ET
4181 return rc;
4182}
4183
4184static void bnx2x_free_mem(struct bnx2x *bp)
4185{
4186
4187#define BNX2X_PCI_FREE(x, y, size) \
4188 do { \
4189 if (x) { \
4190 pci_free_consistent(bp->pdev, size, x, y); \
4191 x = NULL; \
4192 y = 0; \
4193 } \
4194 } while (0)
4195
4196#define BNX2X_FREE(x) \
4197 do { \
4198 if (x) { \
4199 vfree(x); \
4200 x = NULL; \
4201 } \
4202 } while (0)
4203
4204 int i;
4205
4206 /* fastpath */
4207 for_each_queue(bp, i) {
4208
4209 /* Status blocks */
4210 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
4211 bnx2x_fp(bp, i, status_blk_mapping),
4212 sizeof(struct host_status_block) +
4213 sizeof(struct eth_tx_db_data));
4214
4215 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
4216 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
4217 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
4218 bnx2x_fp(bp, i, tx_desc_mapping),
4219 sizeof(struct eth_tx_bd) * NUM_TX_BD);
4220
4221 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
4222 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
4223 bnx2x_fp(bp, i, rx_desc_mapping),
4224 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4225
4226 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
4227 bnx2x_fp(bp, i, rx_comp_mapping),
4228 sizeof(struct eth_fast_path_rx_cqe) *
4229 NUM_RCQ_BD);
4230 }
4231
4232 BNX2X_FREE(bp->fp);
4233
4234 /* end of fastpath */
4235
4236 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
4237 (sizeof(struct host_def_status_block)));
4238
4239 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
4240 (sizeof(struct bnx2x_slowpath)));
4241
4242#ifdef BCM_ISCSI
4243 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
4244 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
4245 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
4246 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
4247#endif
4248 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, PAGE_SIZE);
4249
4250#undef BNX2X_PCI_FREE
4251#undef BNX2X_KFREE
4252}
4253
4254static int bnx2x_alloc_mem(struct bnx2x *bp)
4255{
4256
4257#define BNX2X_PCI_ALLOC(x, y, size) \
4258 do { \
4259 x = pci_alloc_consistent(bp->pdev, size, y); \
4260 if (x == NULL) \
4261 goto alloc_mem_err; \
4262 memset(x, 0, size); \
4263 } while (0)
4264
4265#define BNX2X_ALLOC(x, size) \
4266 do { \
4267 x = vmalloc(size); \
4268 if (x == NULL) \
4269 goto alloc_mem_err; \
4270 memset(x, 0, size); \
4271 } while (0)
4272
4273 int i;
4274
4275 /* fastpath */
4276 BNX2X_ALLOC(bp->fp, sizeof(struct bnx2x_fastpath) * bp->num_queues);
4277
4278 for_each_queue(bp, i) {
4279 bnx2x_fp(bp, i, bp) = bp;
4280
4281 /* Status blocks */
4282 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
4283 &bnx2x_fp(bp, i, status_blk_mapping),
4284 sizeof(struct host_status_block) +
4285 sizeof(struct eth_tx_db_data));
4286
4287 bnx2x_fp(bp, i, hw_tx_prods) =
4288 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
4289
4290 bnx2x_fp(bp, i, tx_prods_mapping) =
4291 bnx2x_fp(bp, i, status_blk_mapping) +
4292 sizeof(struct host_status_block);
4293
4294 /* fast path rings: tx_buf tx_desc rx_buf rx_desc rx_comp */
4295 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
4296 sizeof(struct sw_tx_bd) * NUM_TX_BD);
4297 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
4298 &bnx2x_fp(bp, i, tx_desc_mapping),
4299 sizeof(struct eth_tx_bd) * NUM_TX_BD);
4300
4301 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
4302 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4303 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
4304 &bnx2x_fp(bp, i, rx_desc_mapping),
4305 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4306
4307 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
4308 &bnx2x_fp(bp, i, rx_comp_mapping),
4309 sizeof(struct eth_fast_path_rx_cqe) *
4310 NUM_RCQ_BD);
4311
4312 }
4313 /* end of fastpath */
4314
4315 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
4316 sizeof(struct host_def_status_block));
4317
4318 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
4319 sizeof(struct bnx2x_slowpath));
4320
4321#ifdef BCM_ISCSI
4322 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
4323
4324 /* Initialize T1 */
4325 for (i = 0; i < 64*1024; i += 64) {
4326 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
4327 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
4328 }
4329
4330 /* allocate searcher T2 table
4331 we allocate 1/4 of alloc num for T2
4332 (which is not entered into the ILT) */
4333 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
4334
4335 /* Initialize T2 */
4336 for (i = 0; i < 16*1024; i += 64)
4337 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
4338
c14423fe 4339 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
4340 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
4341
4342 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
4343 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
4344
4345 /* QM queues (128*MAX_CONN) */
4346 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
4347#endif
4348
4349 /* Slow path ring */
4350 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
4351
4352 return 0;
4353
4354alloc_mem_err:
4355 bnx2x_free_mem(bp);
4356 return -ENOMEM;
4357
4358#undef BNX2X_PCI_ALLOC
4359#undef BNX2X_ALLOC
4360}
4361
4362static void bnx2x_free_tx_skbs(struct bnx2x *bp)
4363{
4364 int i;
4365
4366 for_each_queue(bp, i) {
4367 struct bnx2x_fastpath *fp = &bp->fp[i];
4368
4369 u16 bd_cons = fp->tx_bd_cons;
4370 u16 sw_prod = fp->tx_pkt_prod;
4371 u16 sw_cons = fp->tx_pkt_cons;
4372
4373 BUG_TRAP(fp->tx_buf_ring != NULL);
4374
4375 while (sw_cons != sw_prod) {
4376 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
4377 sw_cons++;
4378 }
4379 }
4380}
4381
4382static void bnx2x_free_rx_skbs(struct bnx2x *bp)
4383{
4384 int i, j;
4385
4386 for_each_queue(bp, j) {
4387 struct bnx2x_fastpath *fp = &bp->fp[j];
4388
4389 BUG_TRAP(fp->rx_buf_ring != NULL);
4390
4391 for (i = 0; i < NUM_RX_BD; i++) {
4392 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
4393 struct sk_buff *skb = rx_buf->skb;
4394
4395 if (skb == NULL)
4396 continue;
4397
4398 pci_unmap_single(bp->pdev,
4399 pci_unmap_addr(rx_buf, mapping),
4400 bp->rx_buf_use_size,
4401 PCI_DMA_FROMDEVICE);
4402
4403 rx_buf->skb = NULL;
4404 dev_kfree_skb(skb);
4405 }
4406 }
4407}
4408
4409static void bnx2x_free_skbs(struct bnx2x *bp)
4410{
4411 bnx2x_free_tx_skbs(bp);
4412 bnx2x_free_rx_skbs(bp);
4413}
4414
4415static void bnx2x_free_msix_irqs(struct bnx2x *bp)
4416{
4417 int i;
4418
4419 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 4420 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
4421 bp->msix_table[0].vector);
4422
4423 for_each_queue(bp, i) {
c14423fe 4424 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
a2fbb9ea
ET
4425 "state(%x)\n", i, bp->msix_table[i + 1].vector,
4426 bnx2x_fp(bp, i, state));
4427
228241eb
ET
4428 if (bnx2x_fp(bp, i, state) != BNX2X_FP_STATE_CLOSED)
4429 BNX2X_ERR("IRQ of fp #%d being freed while "
4430 "state != closed\n", i);
a2fbb9ea 4431
228241eb 4432 free_irq(bp->msix_table[i + 1].vector, &bp->fp[i]);
a2fbb9ea
ET
4433 }
4434
4435}
4436
4437static void bnx2x_free_irq(struct bnx2x *bp)
4438{
4439
4440 if (bp->flags & USING_MSIX_FLAG) {
4441
4442 bnx2x_free_msix_irqs(bp);
4443 pci_disable_msix(bp->pdev);
4444
4445 bp->flags &= ~USING_MSIX_FLAG;
4446
4447 } else
4448 free_irq(bp->pdev->irq, bp->dev);
4449}
4450
4451static int bnx2x_enable_msix(struct bnx2x *bp)
4452{
4453
4454 int i;
4455
4456 bp->msix_table[0].entry = 0;
4457 for_each_queue(bp, i)
4458 bp->msix_table[i + 1].entry = i + 1;
4459
4460 if (pci_enable_msix(bp->pdev, &bp->msix_table[0],
4461 bp->num_queues + 1)){
228241eb 4462 BNX2X_LOG("failed to enable MSI-X\n");
a2fbb9ea
ET
4463 return -1;
4464
4465 }
4466
4467 bp->flags |= USING_MSIX_FLAG;
4468
4469 return 0;
4470
4471}
4472
4473
4474static int bnx2x_req_msix_irqs(struct bnx2x *bp)
4475{
4476
a2fbb9ea
ET
4477 int i, rc;
4478
a2fbb9ea
ET
4479 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
4480 bp->dev->name, bp->dev);
4481
4482 if (rc) {
4483 BNX2X_ERR("request sp irq failed\n");
4484 return -EBUSY;
4485 }
4486
4487 for_each_queue(bp, i) {
4488 rc = request_irq(bp->msix_table[i + 1].vector,
4489 bnx2x_msix_fp_int, 0,
4490 bp->dev->name, &bp->fp[i]);
4491
4492 if (rc) {
228241eb
ET
4493 BNX2X_ERR("request fp #%d irq failed "
4494 "rc %d\n", i, rc);
a2fbb9ea
ET
4495 bnx2x_free_msix_irqs(bp);
4496 return -EBUSY;
4497 }
4498
4499 bnx2x_fp(bp, i, state) = BNX2X_FP_STATE_IRQ;
4500
4501 }
4502
4503 return 0;
4504
4505}
4506
4507static int bnx2x_req_irq(struct bnx2x *bp)
4508{
4509
4510 int rc = request_irq(bp->pdev->irq, bnx2x_interrupt,
4511 IRQF_SHARED, bp->dev->name, bp->dev);
4512 if (!rc)
4513 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
4514
4515 return rc;
4516
4517}
4518
4519/*
4520 * Init service functions
4521 */
4522
4523static void bnx2x_set_mac_addr(struct bnx2x *bp)
4524{
4525 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
4526
4527 /* CAM allocation
4528 * unicasts 0-31:port0 32-63:port1
4529 * multicast 64-127:port0 128-191:port1
4530 */
4531 config->hdr.length_6b = 2;
4532 config->hdr.offset = bp->port ? 31 : 0;
4533 config->hdr.reserved0 = 0;
4534 config->hdr.reserved1 = 0;
4535
4536 /* primary MAC */
4537 config->config_table[0].cam_entry.msb_mac_addr =
4538 swab16(*(u16 *)&bp->dev->dev_addr[0]);
4539 config->config_table[0].cam_entry.middle_mac_addr =
4540 swab16(*(u16 *)&bp->dev->dev_addr[2]);
4541 config->config_table[0].cam_entry.lsb_mac_addr =
4542 swab16(*(u16 *)&bp->dev->dev_addr[4]);
4543 config->config_table[0].cam_entry.flags = cpu_to_le16(bp->port);
4544 config->config_table[0].target_table_entry.flags = 0;
4545 config->config_table[0].target_table_entry.client_id = 0;
4546 config->config_table[0].target_table_entry.vlan_id = 0;
4547
4548 DP(NETIF_MSG_IFUP, "setting MAC (%04x:%04x:%04x)\n",
4549 config->config_table[0].cam_entry.msb_mac_addr,
4550 config->config_table[0].cam_entry.middle_mac_addr,
4551 config->config_table[0].cam_entry.lsb_mac_addr);
4552
4553 /* broadcast */
4554 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
4555 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
4556 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
4557 config->config_table[1].cam_entry.flags = cpu_to_le16(bp->port);
4558 config->config_table[1].target_table_entry.flags =
4559 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
4560 config->config_table[1].target_table_entry.client_id = 0;
4561 config->config_table[1].target_table_entry.vlan_id = 0;
4562
4563 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
4564 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
4565 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
4566}
4567
4568static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
4569 int *state_p, int poll)
4570{
4571 /* can take a while if any port is running */
4572 int timeout = 500;
4573
c14423fe
ET
4574 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
4575 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
4576
4577 might_sleep();
4578
4579 while (timeout) {
4580
4581 if (poll) {
4582 bnx2x_rx_int(bp->fp, 10);
4583 /* If index is different from 0
4584 * The reply for some commands will
4585 * be on the none default queue
4586 */
4587 if (idx)
4588 bnx2x_rx_int(&bp->fp[idx], 10);
4589 }
4590
4591 mb(); /* state is changed by bnx2x_sp_event()*/
4592
49d66772 4593 if (*state_p == state)
a2fbb9ea
ET
4594 return 0;
4595
4596 timeout--;
4597 msleep(1);
4598
4599 }
4600
a2fbb9ea 4601 /* timeout! */
49d66772
ET
4602 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
4603 poll ? "polling" : "waiting", state, idx);
a2fbb9ea 4604
49d66772 4605 return -EBUSY;
a2fbb9ea
ET
4606}
4607
4608static int bnx2x_setup_leading(struct bnx2x *bp)
4609{
4610
c14423fe 4611 /* reset IGU state */
a2fbb9ea
ET
4612 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4613
4614 /* SETUP ramrod */
4615 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
4616
4617 return bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
4618
4619}
4620
4621static int bnx2x_setup_multi(struct bnx2x *bp, int index)
4622{
4623
4624 /* reset IGU state */
4625 bnx2x_ack_sb(bp, index, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4626
228241eb 4627 /* SETUP ramrod */
a2fbb9ea
ET
4628 bp->fp[index].state = BNX2X_FP_STATE_OPENING;
4629 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0, index, 0);
4630
4631 /* Wait for completion */
4632 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
228241eb 4633 &(bp->fp[index].state), 0);
a2fbb9ea
ET
4634
4635}
4636
4637
4638static int bnx2x_poll(struct napi_struct *napi, int budget);
4639static void bnx2x_set_rx_mode(struct net_device *dev);
4640
4641static int bnx2x_nic_load(struct bnx2x *bp, int req_irq)
4642{
228241eb
ET
4643 u32 load_code;
4644 int i;
a2fbb9ea
ET
4645
4646 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
4647
4648 /* Send LOAD_REQUEST command to MCP.
4649 Returns the type of LOAD command: if it is the
4650 first port to be initialized common blocks should be
4651 initialized, otherwise - not.
4652 */
4653 if (!nomcp) {
228241eb
ET
4654 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
4655 if (!load_code) {
4656 BNX2X_ERR("MCP response failure, unloading\n");
4657 return -EBUSY;
4658 }
4659 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
4660 BNX2X_ERR("MCP refused load request, unloading\n");
a2fbb9ea
ET
4661 return -EBUSY; /* other port in diagnostic mode */
4662 }
4663 } else {
228241eb 4664 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
a2fbb9ea
ET
4665 }
4666
a2fbb9ea
ET
4667 /* if we can't use msix we only need one fp,
4668 * so try to enable msix with the requested number of fp's
4669 * and fallback to inta with one fp
4670 */
4671 if (req_irq) {
a2fbb9ea
ET
4672 if (use_inta) {
4673 bp->num_queues = 1;
4674 } else {
c14423fe 4675 if ((use_multi > 1) && (use_multi <= 16))
a2fbb9ea
ET
4676 /* user requested number */
4677 bp->num_queues = use_multi;
4678 else if (use_multi == 1)
4679 bp->num_queues = num_online_cpus();
4680 else
4681 bp->num_queues = 1;
4682
4683 if (bnx2x_enable_msix(bp)) {
c14423fe 4684 /* failed to enable msix */
a2fbb9ea
ET
4685 bp->num_queues = 1;
4686 if (use_multi)
c14423fe 4687 BNX2X_ERR("Multi requested but failed"
a2fbb9ea
ET
4688 " to enable MSI-X\n");
4689 }
4690 }
4691 }
4692
c14423fe
ET
4693 DP(NETIF_MSG_IFUP, "set number of queues to %d\n", bp->num_queues);
4694
a2fbb9ea
ET
4695 if (bnx2x_alloc_mem(bp))
4696 return -ENOMEM;
4697
4698 if (req_irq) {
4699 if (bp->flags & USING_MSIX_FLAG) {
4700 if (bnx2x_req_msix_irqs(bp)) {
4701 pci_disable_msix(bp->pdev);
228241eb 4702 goto load_error;
a2fbb9ea
ET
4703 }
4704
4705 } else {
4706 if (bnx2x_req_irq(bp)) {
4707 BNX2X_ERR("IRQ request failed, aborting\n");
228241eb 4708 goto load_error;
a2fbb9ea
ET
4709 }
4710 }
4711 }
4712
4713 for_each_queue(bp, i)
4714 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
4715 bnx2x_poll, 128);
4716
4717
4718 /* Initialize HW */
228241eb
ET
4719 if (bnx2x_function_init(bp,
4720 (load_code == FW_MSG_CODE_DRV_LOAD_COMMON))) {
a2fbb9ea 4721 BNX2X_ERR("HW init failed, aborting\n");
228241eb 4722 goto load_error;
a2fbb9ea
ET
4723 }
4724
4725
4726 atomic_set(&bp->intr_sem, 0);
4727
a2fbb9ea
ET
4728
4729 /* Setup NIC internals and enable interrupts */
4730 bnx2x_nic_init(bp);
4731
4732 /* Send LOAD_DONE command to MCP */
4733 if (!nomcp) {
228241eb
ET
4734 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
4735 if (!load_code) {
a2fbb9ea 4736 BNX2X_ERR("MCP response failure, unloading\n");
228241eb 4737 goto load_int_disable;
a2fbb9ea
ET
4738 }
4739 }
4740
4741 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
4742
4743 /* Enable Rx interrupt handling before sending the ramrod
4744 as it's completed on Rx FP queue */
4745 for_each_queue(bp, i)
4746 napi_enable(&bnx2x_fp(bp, i, napi));
4747
4748 if (bnx2x_setup_leading(bp))
228241eb 4749 goto load_stop_netif;
a2fbb9ea
ET
4750
4751 for_each_nondefault_queue(bp, i)
4752 if (bnx2x_setup_multi(bp, i))
228241eb 4753 goto load_stop_netif;
a2fbb9ea
ET
4754
4755 bnx2x_set_mac_addr(bp);
4756
c18487ee 4757 bnx2x_initial_phy_init(bp);
a2fbb9ea
ET
4758
4759 /* Start fast path */
4760 if (req_irq) { /* IRQ is only requested from bnx2x_open */
4761 netif_start_queue(bp->dev);
4762 if (bp->flags & USING_MSIX_FLAG)
4763 printk(KERN_INFO PFX "%s: using MSI-X\n",
4764 bp->dev->name);
4765
4766 /* Otherwise Tx queue should be only reenabled */
4767 } else if (netif_running(bp->dev)) {
4768 netif_wake_queue(bp->dev);
4769 bnx2x_set_rx_mode(bp->dev);
4770 }
4771
4772 /* start the timer */
4773 mod_timer(&bp->timer, jiffies + bp->current_interval);
4774
4775 return 0;
4776
228241eb 4777load_stop_netif:
a2fbb9ea
ET
4778 for_each_queue(bp, i)
4779 napi_disable(&bnx2x_fp(bp, i, napi));
4780
228241eb 4781load_int_disable:
615f8fd9 4782 bnx2x_int_disable_sync(bp);
a2fbb9ea
ET
4783
4784 bnx2x_free_skbs(bp);
4785 bnx2x_free_irq(bp);
4786
228241eb 4787load_error:
a2fbb9ea
ET
4788 bnx2x_free_mem(bp);
4789
4790 /* TBD we really need to reset the chip
4791 if we want to recover from this */
228241eb 4792 return -EBUSY;
a2fbb9ea
ET
4793}
4794
a2fbb9ea
ET
4795
4796static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
4797{
4798 int port = bp->port;
4799#ifdef USE_DMAE
4800 u32 wb_write[2];
4801#endif
4802 int base, i;
4803
4804 DP(NETIF_MSG_IFDOWN, "reset called with code %x\n", reset_code);
4805
4806 /* Do not rcv packets to BRB */
4807 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
4808 /* Do not direct rcv packets that are not for MCP to the BRB */
4809 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
4810 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
4811
4812 /* Configure IGU and AEU */
4813 REG_WR(bp, HC_REG_CONFIG_0 + port*4, 0x1000);
4814 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
4815
4816 /* TODO: Close Doorbell port? */
4817
4818 /* Clear ILT */
4819#ifdef USE_DMAE
4820 wb_write[0] = 0;
4821 wb_write[1] = 0;
4822#endif
4823 base = port * RQ_ONCHIP_AT_PORT_SIZE;
4824 for (i = base; i < base + RQ_ONCHIP_AT_PORT_SIZE; i++) {
4825#ifdef USE_DMAE
4826 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
4827#else
4828 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT, 0);
4829 REG_WR_IND(bp, PXP2_REG_RQ_ONCHIP_AT + 4, 0);
4830#endif
4831 }
4832
4833 if (reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) {
4834 /* reset_common */
4835 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4836 0xd3ffff7f);
4837 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
4838 0x1403);
4839 }
4840}
4841
4842static int bnx2x_stop_multi(struct bnx2x *bp, int index)
4843{
4844
4845 int rc;
4846
c14423fe 4847 /* halt the connection */
a2fbb9ea
ET
4848 bp->fp[index].state = BNX2X_FP_STATE_HALTING;
4849 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, 0, 0);
4850
4851
4852 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
4853 &(bp->fp[index].state), 1);
c14423fe 4854 if (rc) /* timeout */
a2fbb9ea
ET
4855 return rc;
4856
4857 /* delete cfc entry */
4858 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
4859
49d66772 4860 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
a2fbb9ea
ET
4861 &(bp->fp[index].state), 1);
4862
4863}
4864
4865
4866static void bnx2x_stop_leading(struct bnx2x *bp)
4867{
49d66772 4868 u16 dsb_sp_prod_idx;
c14423fe 4869 /* if the other port is handling traffic,
a2fbb9ea
ET
4870 this can take a lot of time */
4871 int timeout = 500;
4872
4873 might_sleep();
4874
4875 /* Send HALT ramrod */
4876 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
4877 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, 0, 0);
4878
4879 if (bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
4880 &(bp->fp[0].state), 1))
4881 return;
4882
49d66772 4883 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 4884
228241eb 4885 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
4886 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
4887
49d66772 4888 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
4889 we are going to reset the chip anyway
4890 so there is not much to do if this times out
4891 */
49d66772
ET
4892 while ((dsb_sp_prod_idx == *bp->dsb_sp_prod) && timeout) {
4893 timeout--;
4894 msleep(1);
a2fbb9ea 4895 }
49d66772
ET
4896 if (!timeout) {
4897 DP(NETIF_MSG_IFDOWN, "timeout polling for completion "
4898 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
4899 *bp->dsb_sp_prod, dsb_sp_prod_idx);
4900 }
4901 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
4902 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
4903}
4904
49d66772 4905
228241eb 4906static int bnx2x_nic_unload(struct bnx2x *bp, int free_irq)
a2fbb9ea
ET
4907{
4908 u32 reset_code = 0;
228241eb 4909 int i, timeout;
a2fbb9ea
ET
4910
4911 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
4912
228241eb 4913 del_timer_sync(&bp->timer);
a2fbb9ea 4914
228241eb
ET
4915 bp->rx_mode = BNX2X_RX_MODE_NONE;
4916 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 4917
228241eb
ET
4918 if (netif_running(bp->dev)) {
4919 netif_tx_disable(bp->dev);
4920 bp->dev->trans_start = jiffies; /* prevent tx timeout */
4921 }
4922
4923 /* Wait until all fast path tasks complete */
4924 for_each_queue(bp, i) {
4925 struct bnx2x_fastpath *fp = &bp->fp[i];
4926
4927 timeout = 1000;
4928 while (bnx2x_has_work(fp) && (timeout--))
4929 msleep(1);
4930 if (!timeout)
4931 BNX2X_ERR("timeout waiting for queue[%d]\n", i);
4932 }
a2fbb9ea
ET
4933
4934 /* Wait until stat ramrod returns and all SP tasks complete */
228241eb
ET
4935 timeout = 1000;
4936 while ((bp->stat_pending || (bp->spq_left != MAX_SPQ_PENDING)) &&
4937 (timeout--))
a2fbb9ea
ET
4938 msleep(1);
4939
228241eb
ET
4940 for_each_queue(bp, i)
4941 napi_disable(&bnx2x_fp(bp, i, napi));
4942 /* Disable interrupts after Tx and Rx are disabled on stack level */
4943 bnx2x_int_disable_sync(bp);
a2fbb9ea
ET
4944
4945 if (bp->flags & NO_WOL_FLAG)
4946 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
228241eb 4947
a2fbb9ea
ET
4948 else if (bp->wol) {
4949 u32 emac_base = bp->port ? GRCBASE_EMAC0 : GRCBASE_EMAC1;
4950 u8 *mac_addr = bp->dev->dev_addr;
4951 u32 val = (EMAC_MODE_MPKT | EMAC_MODE_MPKT_RCVD |
4952 EMAC_MODE_ACPI_RCVD);
4953
4954 EMAC_WR(EMAC_REG_EMAC_MODE, val);
4955
4956 val = (mac_addr[0] << 8) | mac_addr[1];
4957 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH, val);
4958
4959 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
4960 (mac_addr[4] << 8) | mac_addr[5];
4961 EMAC_WR(EMAC_REG_EMAC_MAC_MATCH + 4, val);
4962
4963 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
228241eb 4964
a2fbb9ea
ET
4965 } else
4966 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
4967
228241eb 4968 /* Close multi and leading connections */
a2fbb9ea
ET
4969 for_each_nondefault_queue(bp, i)
4970 if (bnx2x_stop_multi(bp, i))
228241eb 4971 goto unload_error;
a2fbb9ea
ET
4972
4973 bnx2x_stop_leading(bp);
228241eb
ET
4974 if ((bp->state != BNX2X_STATE_CLOSING_WAIT4_UNLOAD) ||
4975 (bp->fp[0].state != BNX2X_FP_STATE_CLOSED)) {
4976 DP(NETIF_MSG_IFDOWN, "failed to close leading properly!"
4977 "state 0x%x fp[0].state 0x%x",
4978 bp->state, bp->fp[0].state);
4979 }
4980
4981unload_error:
c18487ee 4982 bnx2x__link_reset(bp);
a2fbb9ea 4983
a2fbb9ea 4984 if (!nomcp)
228241eb 4985 reset_code = bnx2x_fw_command(bp, reset_code);
a2fbb9ea 4986 else
228241eb 4987 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
a2fbb9ea
ET
4988
4989 /* Release IRQs */
228241eb 4990 if (free_irq)
a2fbb9ea
ET
4991 bnx2x_free_irq(bp);
4992
4993 /* Reset the chip */
228241eb 4994 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
4995
4996 /* Report UNLOAD_DONE to MCP */
4997 if (!nomcp)
4998 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
4999
5000 /* Free SKBs and driver internals */
5001 bnx2x_free_skbs(bp);
5002 bnx2x_free_mem(bp);
5003
5004 bp->state = BNX2X_STATE_CLOSED;
228241eb 5005
a2fbb9ea
ET
5006 netif_carrier_off(bp->dev);
5007
5008 return 0;
5009}
5010
5011/* end of nic load/unload */
5012
5013/* ethtool_ops */
5014
5015/*
5016 * Init service functions
5017 */
5018
5019static void bnx2x_link_settings_supported(struct bnx2x *bp, u32 switch_cfg)
5020{
5021 int port = bp->port;
5022 u32 ext_phy_type;
5023
a2fbb9ea
ET
5024 switch (switch_cfg) {
5025 case SWITCH_CFG_1G:
5026 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
5027
c18487ee
YR
5028 ext_phy_type =
5029 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
5030 switch (ext_phy_type) {
5031 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
5032 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
5033 ext_phy_type);
5034
5035 bp->supported |= (SUPPORTED_10baseT_Half |
5036 SUPPORTED_10baseT_Full |
5037 SUPPORTED_100baseT_Half |
5038 SUPPORTED_100baseT_Full |
5039 SUPPORTED_1000baseT_Full |
f1410647 5040 SUPPORTED_2500baseX_Full |
a2fbb9ea
ET
5041 SUPPORTED_TP | SUPPORTED_FIBRE |
5042 SUPPORTED_Autoneg |
5043 SUPPORTED_Pause |
5044 SUPPORTED_Asym_Pause);
5045 break;
5046
5047 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
5048 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
5049 ext_phy_type);
5050
f1410647
ET
5051 bp->supported |= (SUPPORTED_10baseT_Half |
5052 SUPPORTED_10baseT_Full |
5053 SUPPORTED_100baseT_Half |
5054 SUPPORTED_100baseT_Full |
a2fbb9ea
ET
5055 SUPPORTED_1000baseT_Full |
5056 SUPPORTED_TP | SUPPORTED_FIBRE |
5057 SUPPORTED_Autoneg |
5058 SUPPORTED_Pause |
5059 SUPPORTED_Asym_Pause);
5060 break;
5061
5062 default:
5063 BNX2X_ERR("NVRAM config error. "
5064 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 5065 bp->link_params.ext_phy_config);
a2fbb9ea
ET
5066 return;
5067 }
5068
5069 bp->phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
5070 port*0x10);
5071 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->phy_addr);
5072 break;
5073
5074 case SWITCH_CFG_10G:
5075 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
5076
c18487ee
YR
5077 ext_phy_type =
5078 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
5079 switch (ext_phy_type) {
5080 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
5081 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
5082 ext_phy_type);
5083
5084 bp->supported |= (SUPPORTED_10baseT_Half |
5085 SUPPORTED_10baseT_Full |
5086 SUPPORTED_100baseT_Half |
5087 SUPPORTED_100baseT_Full |
5088 SUPPORTED_1000baseT_Full |
f1410647 5089 SUPPORTED_2500baseX_Full |
a2fbb9ea
ET
5090 SUPPORTED_10000baseT_Full |
5091 SUPPORTED_TP | SUPPORTED_FIBRE |
5092 SUPPORTED_Autoneg |
5093 SUPPORTED_Pause |
5094 SUPPORTED_Asym_Pause);
5095 break;
5096
5097 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
f1410647
ET
5098 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
5099 ext_phy_type);
5100
5101 bp->supported |= (SUPPORTED_10000baseT_Full |
5102 SUPPORTED_FIBRE |
5103 SUPPORTED_Pause |
5104 SUPPORTED_Asym_Pause);
5105 break;
5106
a2fbb9ea 5107 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
f1410647
ET
5108 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
5109 ext_phy_type);
5110
5111 bp->supported |= (SUPPORTED_10000baseT_Full |
5112 SUPPORTED_1000baseT_Full |
5113 SUPPORTED_Autoneg |
5114 SUPPORTED_FIBRE |
5115 SUPPORTED_Pause |
5116 SUPPORTED_Asym_Pause);
5117 break;
5118
5119 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5120 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
a2fbb9ea
ET
5121 ext_phy_type);
5122
5123 bp->supported |= (SUPPORTED_10000baseT_Full |
f1410647 5124 SUPPORTED_1000baseT_Full |
a2fbb9ea 5125 SUPPORTED_FIBRE |
f1410647
ET
5126 SUPPORTED_Autoneg |
5127 SUPPORTED_Pause |
5128 SUPPORTED_Asym_Pause);
5129 break;
5130
c18487ee
YR
5131 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5132 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
5133 ext_phy_type);
5134
5135 bp->supported |= (SUPPORTED_10000baseT_Full |
5136 SUPPORTED_2500baseX_Full |
5137 SUPPORTED_1000baseT_Full |
5138 SUPPORTED_FIBRE |
5139 SUPPORTED_Autoneg |
5140 SUPPORTED_Pause |
5141 SUPPORTED_Asym_Pause);
5142 break;
5143
f1410647
ET
5144 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5145 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
5146 ext_phy_type);
5147
5148 bp->supported |= (SUPPORTED_10000baseT_Full |
5149 SUPPORTED_TP |
5150 SUPPORTED_Autoneg |
a2fbb9ea
ET
5151 SUPPORTED_Pause |
5152 SUPPORTED_Asym_Pause);
5153 break;
5154
c18487ee
YR
5155 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
5156 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
5157 bp->link_params.ext_phy_config);
5158 break;
5159
a2fbb9ea
ET
5160 default:
5161 BNX2X_ERR("NVRAM config error. "
5162 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 5163 bp->link_params.ext_phy_config);
a2fbb9ea
ET
5164 return;
5165 }
5166
5167 bp->phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
5168 port*0x18);
5169 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->phy_addr);
5170
a2fbb9ea
ET
5171 break;
5172
5173 default:
5174 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
5175 bp->link_config);
5176 return;
5177 }
c18487ee 5178 bp->link_params.phy_addr = bp->phy_addr;
a2fbb9ea
ET
5179
5180 /* mask what we support according to speed_cap_mask */
c18487ee
YR
5181 if (!(bp->link_params.speed_cap_mask &
5182 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
a2fbb9ea
ET
5183 bp->supported &= ~SUPPORTED_10baseT_Half;
5184
c18487ee
YR
5185 if (!(bp->link_params.speed_cap_mask &
5186 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
a2fbb9ea
ET
5187 bp->supported &= ~SUPPORTED_10baseT_Full;
5188
c18487ee
YR
5189 if (!(bp->link_params.speed_cap_mask &
5190 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
a2fbb9ea
ET
5191 bp->supported &= ~SUPPORTED_100baseT_Half;
5192
c18487ee
YR
5193 if (!(bp->link_params.speed_cap_mask &
5194 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
a2fbb9ea
ET
5195 bp->supported &= ~SUPPORTED_100baseT_Full;
5196
c18487ee
YR
5197 if (!(bp->link_params.speed_cap_mask &
5198 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
a2fbb9ea
ET
5199 bp->supported &= ~(SUPPORTED_1000baseT_Half |
5200 SUPPORTED_1000baseT_Full);
5201
c18487ee
YR
5202 if (!(bp->link_params.speed_cap_mask &
5203 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
f1410647 5204 bp->supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 5205
c18487ee
YR
5206 if (!(bp->link_params.speed_cap_mask &
5207 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
a2fbb9ea
ET
5208 bp->supported &= ~SUPPORTED_10000baseT_Full;
5209
5210 BNX2X_DEV_INFO("supported 0x%x\n", bp->supported);
5211}
5212
5213static void bnx2x_link_settings_requested(struct bnx2x *bp)
5214{
c18487ee 5215 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea
ET
5216
5217 switch (bp->link_config & PORT_FEATURE_LINK_SPEED_MASK) {
5218 case PORT_FEATURE_LINK_SPEED_AUTO:
5219 if (bp->supported & SUPPORTED_Autoneg) {
c18487ee 5220 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
a2fbb9ea
ET
5221 bp->advertising = bp->supported;
5222 } else {
c18487ee
YR
5223 u32 ext_phy_type =
5224 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
5225
5226 if ((ext_phy_type ==
5227 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
5228 (ext_phy_type ==
5229 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 5230 /* force 10G, no AN */
c18487ee 5231 bp->link_params.req_line_speed = SPEED_10000;
a2fbb9ea
ET
5232 bp->advertising =
5233 (ADVERTISED_10000baseT_Full |
5234 ADVERTISED_FIBRE);
5235 break;
5236 }
5237 BNX2X_ERR("NVRAM config error. "
5238 "Invalid link_config 0x%x"
5239 " Autoneg not supported\n",
5240 bp->link_config);
5241 return;
5242 }
5243 break;
5244
5245 case PORT_FEATURE_LINK_SPEED_10M_FULL:
f1410647 5246 if (bp->supported & SUPPORTED_10baseT_Full) {
c18487ee 5247 bp->link_params.req_line_speed = SPEED_10;
a2fbb9ea
ET
5248 bp->advertising = (ADVERTISED_10baseT_Full |
5249 ADVERTISED_TP);
5250 } else {
5251 BNX2X_ERR("NVRAM config error. "
5252 "Invalid link_config 0x%x"
5253 " speed_cap_mask 0x%x\n",
c18487ee
YR
5254 bp->link_config,
5255 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
5256 return;
5257 }
5258 break;
5259
5260 case PORT_FEATURE_LINK_SPEED_10M_HALF:
f1410647 5261 if (bp->supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
5262 bp->link_params.req_line_speed = SPEED_10;
5263 bp->link_params.req_duplex = DUPLEX_HALF;
a2fbb9ea
ET
5264 bp->advertising = (ADVERTISED_10baseT_Half |
5265 ADVERTISED_TP);
5266 } else {
5267 BNX2X_ERR("NVRAM config error. "
5268 "Invalid link_config 0x%x"
5269 " speed_cap_mask 0x%x\n",
c18487ee
YR
5270 bp->link_config,
5271 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
5272 return;
5273 }
5274 break;
5275
5276 case PORT_FEATURE_LINK_SPEED_100M_FULL:
f1410647 5277 if (bp->supported & SUPPORTED_100baseT_Full) {
c18487ee 5278 bp->link_params.req_line_speed = SPEED_100;
a2fbb9ea
ET
5279 bp->advertising = (ADVERTISED_100baseT_Full |
5280 ADVERTISED_TP);
5281 } else {
5282 BNX2X_ERR("NVRAM config error. "
5283 "Invalid link_config 0x%x"
5284 " speed_cap_mask 0x%x\n",
c18487ee
YR
5285 bp->link_config,
5286 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
5287 return;
5288 }
5289 break;
5290
5291 case PORT_FEATURE_LINK_SPEED_100M_HALF:
f1410647 5292 if (bp->supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
5293 bp->link_params.req_line_speed = SPEED_100;
5294 bp->link_params.req_duplex = DUPLEX_HALF;
a2fbb9ea
ET
5295 bp->advertising = (ADVERTISED_100baseT_Half |
5296 ADVERTISED_TP);
5297 } else {
5298 BNX2X_ERR("NVRAM config error. "
5299 "Invalid link_config 0x%x"
5300 " speed_cap_mask 0x%x\n",
c18487ee
YR
5301 bp->link_config,
5302 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
5303 return;
5304 }
5305 break;
5306
5307 case PORT_FEATURE_LINK_SPEED_1G:
f1410647 5308 if (bp->supported & SUPPORTED_1000baseT_Full) {
c18487ee 5309 bp->link_params.req_line_speed = SPEED_1000;
a2fbb9ea
ET
5310 bp->advertising = (ADVERTISED_1000baseT_Full |
5311 ADVERTISED_TP);
5312 } else {
5313 BNX2X_ERR("NVRAM config error. "
5314 "Invalid link_config 0x%x"
5315 " speed_cap_mask 0x%x\n",
c18487ee
YR
5316 bp->link_config,
5317 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
5318 return;
5319 }
5320 break;
5321
5322 case PORT_FEATURE_LINK_SPEED_2_5G:
f1410647 5323 if (bp->supported & SUPPORTED_2500baseX_Full) {
c18487ee 5324 bp->link_params.req_line_speed = SPEED_2500;
f1410647 5325 bp->advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
5326 ADVERTISED_TP);
5327 } else {
5328 BNX2X_ERR("NVRAM config error. "
5329 "Invalid link_config 0x%x"
5330 " speed_cap_mask 0x%x\n",
c18487ee
YR
5331 bp->link_config,
5332 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
5333 return;
5334 }
5335 break;
5336
5337 case PORT_FEATURE_LINK_SPEED_10G_CX4:
5338 case PORT_FEATURE_LINK_SPEED_10G_KX4:
5339 case PORT_FEATURE_LINK_SPEED_10G_KR:
f1410647 5340 if (bp->supported & SUPPORTED_10000baseT_Full) {
c18487ee 5341 bp->link_params.req_line_speed = SPEED_10000;
a2fbb9ea
ET
5342 bp->advertising = (ADVERTISED_10000baseT_Full |
5343 ADVERTISED_FIBRE);
5344 } else {
5345 BNX2X_ERR("NVRAM config error. "
5346 "Invalid link_config 0x%x"
5347 " speed_cap_mask 0x%x\n",
c18487ee
YR
5348 bp->link_config,
5349 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
5350 return;
5351 }
5352 break;
5353
5354 default:
5355 BNX2X_ERR("NVRAM config error. "
5356 "BAD link speed link_config 0x%x\n",
5357 bp->link_config);
c18487ee 5358 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
a2fbb9ea
ET
5359 bp->advertising = bp->supported;
5360 break;
5361 }
a2fbb9ea 5362
c18487ee 5363 bp->link_params.req_flow_ctrl = (bp->link_config &
a2fbb9ea 5364 PORT_FEATURE_FLOW_CONTROL_MASK);
c18487ee
YR
5365 if ((bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
5366 (!bp->supported & SUPPORTED_Autoneg))
5367 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
a2fbb9ea 5368
c18487ee 5369 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 5370 " advertising 0x%x\n",
c18487ee
YR
5371 bp->link_params.req_line_speed,
5372 bp->link_params.req_duplex,
5373 bp->link_params.req_flow_ctrl, bp->advertising);
a2fbb9ea
ET
5374}
5375
5376static void bnx2x_get_hwinfo(struct bnx2x *bp)
5377{
5378 u32 val, val2, val3, val4, id;
5379 int port = bp->port;
a2fbb9ea
ET
5380
5381 bp->shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5382 BNX2X_DEV_INFO("shmem offset is %x\n", bp->shmem_base);
5383
5384 /* Get the chip revision id and number. */
5385 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
5386 val = REG_RD(bp, MISC_REG_CHIP_NUM);
5387 id = ((val & 0xffff) << 16);
5388 val = REG_RD(bp, MISC_REG_CHIP_REV);
5389 id |= ((val & 0xf) << 12);
5390 val = REG_RD(bp, MISC_REG_CHIP_METAL);
5391 id |= ((val & 0xff) << 4);
5392 REG_RD(bp, MISC_REG_BOND_ID);
5393 id |= (val & 0xf);
5394 bp->chip_id = id;
5395 BNX2X_DEV_INFO("chip ID is %x\n", id);
5396
c18487ee
YR
5397 bp->link_params.bp = bp;
5398
a2fbb9ea
ET
5399 if (!bp->shmem_base || (bp->shmem_base != 0xAF900)) {
5400 BNX2X_DEV_INFO("MCP not active\n");
5401 nomcp = 1;
5402 goto set_mac;
5403 }
5404
5405 val = SHMEM_RD(bp, validity_map[port]);
5406 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
f1410647
ET
5407 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5408 BNX2X_ERR("BAD MCP validity signature\n");
a2fbb9ea 5409
f1410647 5410 bp->fw_seq = (SHMEM_RD(bp, func_mb[port].drv_mb_header) &
a2fbb9ea
ET
5411 DRV_MSG_SEQ_NUMBER_MASK);
5412
5413 bp->hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
f1410647 5414 bp->board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
c18487ee 5415 bp->link_params.serdes_config =
f1410647 5416 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
c18487ee 5417 bp->link_params.lane_config =
a2fbb9ea 5418 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 5419 bp->link_params.ext_phy_config =
a2fbb9ea
ET
5420 SHMEM_RD(bp,
5421 dev_info.port_hw_config[port].external_phy_config);
c18487ee 5422 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
5423 SHMEM_RD(bp,
5424 dev_info.port_hw_config[port].speed_capability_mask);
5425
5426 bp->link_config =
5427 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
5428
c18487ee
YR
5429 BNX2X_DEV_INFO("serdes_config (%08x) lane_config (%08x)\n"
5430 KERN_INFO " ext_phy_config (%08x) speed_cap_mask (%08x)"
5431 " link_config (%08x)\n",
5432 bp->link_params.serdes_config,
5433 bp->link_params.lane_config,
5434 bp->link_params.ext_phy_config,
5435 bp->link_params.speed_cap_mask,
5436 bp->link_config);
a2fbb9ea 5437
c18487ee
YR
5438 bp->link_params.switch_cfg = (bp->link_config &
5439 PORT_FEATURE_CONNECTED_SWITCH_MASK);
5440 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
5441
5442 bnx2x_link_settings_requested(bp);
5443
5444 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
5445 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
5446 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
5447 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
5448 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
5449 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
5450 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
5451 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
5452 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
5453 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
a2fbb9ea 5454
a2fbb9ea
ET
5455
5456
5457 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
5458 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
5459 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
5460 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
5461
5462 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
5463 val, val2, val3, val4);
5464
5465 /* bc ver */
5466 if (!nomcp) {
5467 bp->bc_ver = val = ((SHMEM_RD(bp, dev_info.bc_rev)) >> 8);
5468 BNX2X_DEV_INFO("bc_ver %X\n", val);
5469 if (val < BNX2X_BC_VER) {
5470 /* for now only warn
5471 * later we might need to enforce this */
5472 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
5473 " please upgrade BC\n", BNX2X_BC_VER, val);
5474 }
5475 } else {
5476 bp->bc_ver = 0;
5477 }
5478
5479 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
5480 bp->flash_size = (NVRAM_1MB_SIZE << (val & MCPR_NVM_CFG4_FLASH_SIZE));
5481 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
5482 bp->flash_size, bp->flash_size);
5483
5484 return;
5485
5486set_mac: /* only supposed to happen on emulation/FPGA */
f1410647
ET
5487 BNX2X_ERR("warning rendom MAC workaround active\n");
5488 random_ether_addr(bp->dev->dev_addr);
a2fbb9ea
ET
5489 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, 6);
5490
5491}
5492
5493/*
5494 * ethtool service functions
5495 */
5496
5497/* All ethtool functions called with rtnl_lock */
5498
5499static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5500{
5501 struct bnx2x *bp = netdev_priv(dev);
5502
5503 cmd->supported = bp->supported;
5504 cmd->advertising = bp->advertising;
5505
5506 if (netif_carrier_ok(dev)) {
c18487ee
YR
5507 cmd->speed = bp->link_vars.line_speed;
5508 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 5509 } else {
c18487ee
YR
5510 cmd->speed = bp->link_params.req_line_speed;
5511 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea
ET
5512 }
5513
c18487ee
YR
5514 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
5515 u32 ext_phy_type =
5516 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
5517
5518 switch (ext_phy_type) {
5519 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
5520 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
5521 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
5522 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 5523 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
f1410647
ET
5524 cmd->port = PORT_FIBRE;
5525 break;
5526
5527 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
5528 cmd->port = PORT_TP;
5529 break;
5530
c18487ee
YR
5531 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
5532 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
5533 bp->link_params.ext_phy_config);
5534 break;
5535
f1410647
ET
5536 default:
5537 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
5538 bp->link_params.ext_phy_config);
5539 break;
f1410647
ET
5540 }
5541 } else
a2fbb9ea 5542 cmd->port = PORT_TP;
a2fbb9ea
ET
5543
5544 cmd->phy_address = bp->phy_addr;
5545 cmd->transceiver = XCVR_INTERNAL;
5546
c18487ee 5547 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 5548 cmd->autoneg = AUTONEG_ENABLE;
f1410647 5549 else
a2fbb9ea 5550 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
5551
5552 cmd->maxtxpkt = 0;
5553 cmd->maxrxpkt = 0;
5554
5555 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
5556 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
5557 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
5558 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
5559 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
5560 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
5561 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
5562
5563 return 0;
5564}
5565
5566static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5567{
5568 struct bnx2x *bp = netdev_priv(dev);
5569 u32 advertising;
5570
5571 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
5572 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
5573 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
5574 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
5575 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
5576 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
5577 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
5578
a2fbb9ea 5579 if (cmd->autoneg == AUTONEG_ENABLE) {
f1410647
ET
5580 if (!(bp->supported & SUPPORTED_Autoneg)) {
5581 DP(NETIF_MSG_LINK, "Aotoneg not supported\n");
a2fbb9ea 5582 return -EINVAL;
f1410647 5583 }
a2fbb9ea
ET
5584
5585 /* advertise the requested speed and duplex if supported */
5586 cmd->advertising &= bp->supported;
5587
c18487ee
YR
5588 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
5589 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea
ET
5590 bp->advertising |= (ADVERTISED_Autoneg | cmd->advertising);
5591
5592 } else { /* forced speed */
5593 /* advertise the requested speed and duplex if supported */
5594 switch (cmd->speed) {
5595 case SPEED_10:
5596 if (cmd->duplex == DUPLEX_FULL) {
f1410647
ET
5597 if (!(bp->supported &
5598 SUPPORTED_10baseT_Full)) {
5599 DP(NETIF_MSG_LINK,
5600 "10M full not supported\n");
a2fbb9ea 5601 return -EINVAL;
f1410647 5602 }
a2fbb9ea
ET
5603
5604 advertising = (ADVERTISED_10baseT_Full |
5605 ADVERTISED_TP);
5606 } else {
f1410647
ET
5607 if (!(bp->supported &
5608 SUPPORTED_10baseT_Half)) {
5609 DP(NETIF_MSG_LINK,
5610 "10M half not supported\n");
a2fbb9ea 5611 return -EINVAL;
f1410647 5612 }
a2fbb9ea
ET
5613
5614 advertising = (ADVERTISED_10baseT_Half |
5615 ADVERTISED_TP);
5616 }
5617 break;
5618
5619 case SPEED_100:
5620 if (cmd->duplex == DUPLEX_FULL) {
5621 if (!(bp->supported &
f1410647
ET
5622 SUPPORTED_100baseT_Full)) {
5623 DP(NETIF_MSG_LINK,
5624 "100M full not supported\n");
a2fbb9ea 5625 return -EINVAL;
f1410647 5626 }
a2fbb9ea
ET
5627
5628 advertising = (ADVERTISED_100baseT_Full |
5629 ADVERTISED_TP);
5630 } else {
5631 if (!(bp->supported &
f1410647
ET
5632 SUPPORTED_100baseT_Half)) {
5633 DP(NETIF_MSG_LINK,
5634 "100M half not supported\n");
a2fbb9ea 5635 return -EINVAL;
f1410647 5636 }
a2fbb9ea
ET
5637
5638 advertising = (ADVERTISED_100baseT_Half |
5639 ADVERTISED_TP);
5640 }
5641 break;
5642
5643 case SPEED_1000:
f1410647
ET
5644 if (cmd->duplex != DUPLEX_FULL) {
5645 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 5646 return -EINVAL;
f1410647 5647 }
a2fbb9ea 5648
f1410647
ET
5649 if (!(bp->supported & SUPPORTED_1000baseT_Full)) {
5650 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 5651 return -EINVAL;
f1410647 5652 }
a2fbb9ea
ET
5653
5654 advertising = (ADVERTISED_1000baseT_Full |
5655 ADVERTISED_TP);
5656 break;
5657
5658 case SPEED_2500:
f1410647
ET
5659 if (cmd->duplex != DUPLEX_FULL) {
5660 DP(NETIF_MSG_LINK,
5661 "2.5G half not supported\n");
a2fbb9ea 5662 return -EINVAL;
f1410647 5663 }
a2fbb9ea 5664
f1410647
ET
5665 if (!(bp->supported & SUPPORTED_2500baseX_Full)) {
5666 DP(NETIF_MSG_LINK,
5667 "2.5G full not supported\n");
a2fbb9ea 5668 return -EINVAL;
f1410647 5669 }
a2fbb9ea 5670
f1410647 5671 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
5672 ADVERTISED_TP);
5673 break;
5674
5675 case SPEED_10000:
f1410647
ET
5676 if (cmd->duplex != DUPLEX_FULL) {
5677 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 5678 return -EINVAL;
f1410647 5679 }
a2fbb9ea 5680
f1410647
ET
5681 if (!(bp->supported & SUPPORTED_10000baseT_Full)) {
5682 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 5683 return -EINVAL;
f1410647 5684 }
a2fbb9ea
ET
5685
5686 advertising = (ADVERTISED_10000baseT_Full |
5687 ADVERTISED_FIBRE);
5688 break;
5689
5690 default:
f1410647 5691 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
5692 return -EINVAL;
5693 }
5694
c18487ee
YR
5695 bp->link_params.req_line_speed = cmd->speed;
5696 bp->link_params.req_duplex = cmd->duplex;
a2fbb9ea
ET
5697 bp->advertising = advertising;
5698 }
5699
c18487ee 5700 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 5701 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 5702 bp->link_params.req_line_speed, bp->link_params.req_duplex,
a2fbb9ea
ET
5703 bp->advertising);
5704
5705 bnx2x_stop_stats(bp);
c18487ee 5706 bnx2x_link_set(bp);
a2fbb9ea
ET
5707
5708 return 0;
5709}
5710
c18487ee
YR
5711#define PHY_FW_VER_LEN 10
5712
a2fbb9ea
ET
5713static void bnx2x_get_drvinfo(struct net_device *dev,
5714 struct ethtool_drvinfo *info)
5715{
5716 struct bnx2x *bp = netdev_priv(dev);
c18487ee 5717 char phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
5718
5719 strcpy(info->driver, DRV_MODULE_NAME);
5720 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
5721
5722 phy_fw_ver[0] = '\0';
5723 bnx2x_phy_hw_lock(bp);
5724 bnx2x_get_ext_phy_fw_version(&bp->link_params,
5725 (bp->state != BNX2X_STATE_CLOSED),
5726 phy_fw_ver, PHY_FW_VER_LEN);
5727 bnx2x_phy_hw_unlock(bp);
5728
5729 snprintf(info->fw_version, 32, "%d.%d.%d:%d BC:%x%s%s",
a2fbb9ea 5730 BCM_5710_FW_MAJOR_VERSION, BCM_5710_FW_MINOR_VERSION,
c18487ee
YR
5731 BCM_5710_FW_REVISION_VERSION,
5732 BCM_5710_FW_COMPILE_FLAGS, bp->bc_ver,
5733 ((phy_fw_ver[0] != '\0')? " PHY:":""), phy_fw_ver);
a2fbb9ea
ET
5734 strcpy(info->bus_info, pci_name(bp->pdev));
5735 info->n_stats = BNX2X_NUM_STATS;
5736 info->testinfo_len = BNX2X_NUM_TESTS;
5737 info->eedump_len = bp->flash_size;
5738 info->regdump_len = 0;
5739}
5740
5741static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5742{
5743 struct bnx2x *bp = netdev_priv(dev);
5744
5745 if (bp->flags & NO_WOL_FLAG) {
5746 wol->supported = 0;
5747 wol->wolopts = 0;
5748 } else {
5749 wol->supported = WAKE_MAGIC;
5750 if (bp->wol)
5751 wol->wolopts = WAKE_MAGIC;
5752 else
5753 wol->wolopts = 0;
5754 }
5755 memset(&wol->sopass, 0, sizeof(wol->sopass));
5756}
5757
5758static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5759{
5760 struct bnx2x *bp = netdev_priv(dev);
5761
5762 if (wol->wolopts & ~WAKE_MAGIC)
5763 return -EINVAL;
5764
5765 if (wol->wolopts & WAKE_MAGIC) {
5766 if (bp->flags & NO_WOL_FLAG)
5767 return -EINVAL;
5768
5769 bp->wol = 1;
5770 } else {
5771 bp->wol = 0;
5772 }
5773 return 0;
5774}
5775
5776static u32 bnx2x_get_msglevel(struct net_device *dev)
5777{
5778 struct bnx2x *bp = netdev_priv(dev);
5779
5780 return bp->msglevel;
5781}
5782
5783static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
5784{
5785 struct bnx2x *bp = netdev_priv(dev);
5786
5787 if (capable(CAP_NET_ADMIN))
5788 bp->msglevel = level;
5789}
5790
5791static int bnx2x_nway_reset(struct net_device *dev)
5792{
5793 struct bnx2x *bp = netdev_priv(dev);
5794
5795 if (bp->state != BNX2X_STATE_OPEN) {
5796 DP(NETIF_MSG_PROBE, "state is %x, returning\n", bp->state);
5797 return -EAGAIN;
5798 }
5799
5800 bnx2x_stop_stats(bp);
c18487ee 5801 bnx2x_link_set(bp);
a2fbb9ea
ET
5802
5803 return 0;
5804}
5805
5806static int bnx2x_get_eeprom_len(struct net_device *dev)
5807{
5808 struct bnx2x *bp = netdev_priv(dev);
5809
5810 return bp->flash_size;
5811}
5812
5813static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
5814{
5815 int port = bp->port;
5816 int count, i;
5817 u32 val = 0;
5818
5819 /* adjust timeout for emulation/FPGA */
5820 count = NVRAM_TIMEOUT_COUNT;
5821 if (CHIP_REV_IS_SLOW(bp))
5822 count *= 100;
5823
5824 /* request access to nvram interface */
5825 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
5826 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
5827
5828 for (i = 0; i < count*10; i++) {
5829 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
5830 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
5831 break;
5832
5833 udelay(5);
5834 }
5835
5836 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
5837 DP(NETIF_MSG_NVM, "cannot get access to nvram interface\n");
5838 return -EBUSY;
5839 }
5840
5841 return 0;
5842}
5843
5844static int bnx2x_release_nvram_lock(struct bnx2x *bp)
5845{
5846 int port = bp->port;
5847 int count, i;
5848 u32 val = 0;
5849
5850 /* adjust timeout for emulation/FPGA */
5851 count = NVRAM_TIMEOUT_COUNT;
5852 if (CHIP_REV_IS_SLOW(bp))
5853 count *= 100;
5854
5855 /* relinquish nvram interface */
5856 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
5857 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
5858
5859 for (i = 0; i < count*10; i++) {
5860 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
5861 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
5862 break;
5863
5864 udelay(5);
5865 }
5866
5867 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
5868 DP(NETIF_MSG_NVM, "cannot free access to nvram interface\n");
5869 return -EBUSY;
5870 }
5871
5872 return 0;
5873}
5874
5875static void bnx2x_enable_nvram_access(struct bnx2x *bp)
5876{
5877 u32 val;
5878
5879 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
5880
5881 /* enable both bits, even on read */
5882 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
5883 (val | MCPR_NVM_ACCESS_ENABLE_EN |
5884 MCPR_NVM_ACCESS_ENABLE_WR_EN));
5885}
5886
5887static void bnx2x_disable_nvram_access(struct bnx2x *bp)
5888{
5889 u32 val;
5890
5891 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
5892
5893 /* disable both bits, even after read */
5894 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
5895 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
5896 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
5897}
5898
5899static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
5900 u32 cmd_flags)
5901{
f1410647 5902 int count, i, rc;
a2fbb9ea
ET
5903 u32 val;
5904
5905 /* build the command word */
5906 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
5907
5908 /* need to clear DONE bit separately */
5909 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
5910
5911 /* address of the NVRAM to read from */
5912 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
5913 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
5914
5915 /* issue a read command */
5916 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
5917
5918 /* adjust timeout for emulation/FPGA */
5919 count = NVRAM_TIMEOUT_COUNT;
5920 if (CHIP_REV_IS_SLOW(bp))
5921 count *= 100;
5922
5923 /* wait for completion */
5924 *ret_val = 0;
5925 rc = -EBUSY;
5926 for (i = 0; i < count; i++) {
5927 udelay(5);
5928 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
5929
5930 if (val & MCPR_NVM_COMMAND_DONE) {
5931 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
5932 DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
5933 /* we read nvram data in cpu order
5934 * but ethtool sees it as an array of bytes
5935 * converting to big-endian will do the work */
5936 val = cpu_to_be32(val);
5937 *ret_val = val;
5938 rc = 0;
5939 break;
5940 }
5941 }
5942
5943 return rc;
5944}
5945
5946static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
5947 int buf_size)
5948{
5949 int rc;
5950 u32 cmd_flags;
5951 u32 val;
5952
5953 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
5954 DP(NETIF_MSG_NVM,
c14423fe 5955 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
5956 offset, buf_size);
5957 return -EINVAL;
5958 }
5959
5960 if (offset + buf_size > bp->flash_size) {
c14423fe 5961 DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea
ET
5962 " buf_size (0x%x) > flash_size (0x%x)\n",
5963 offset, buf_size, bp->flash_size);
5964 return -EINVAL;
5965 }
5966
5967 /* request access to nvram interface */
5968 rc = bnx2x_acquire_nvram_lock(bp);
5969 if (rc)
5970 return rc;
5971
5972 /* enable access to nvram interface */
5973 bnx2x_enable_nvram_access(bp);
5974
5975 /* read the first word(s) */
5976 cmd_flags = MCPR_NVM_COMMAND_FIRST;
5977 while ((buf_size > sizeof(u32)) && (rc == 0)) {
5978 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
5979 memcpy(ret_buf, &val, 4);
5980
5981 /* advance to the next dword */
5982 offset += sizeof(u32);
5983 ret_buf += sizeof(u32);
5984 buf_size -= sizeof(u32);
5985 cmd_flags = 0;
5986 }
5987
5988 if (rc == 0) {
5989 cmd_flags |= MCPR_NVM_COMMAND_LAST;
5990 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
5991 memcpy(ret_buf, &val, 4);
5992 }
5993
5994 /* disable access to nvram interface */
5995 bnx2x_disable_nvram_access(bp);
5996 bnx2x_release_nvram_lock(bp);
5997
5998 return rc;
5999}
6000
6001static int bnx2x_get_eeprom(struct net_device *dev,
6002 struct ethtool_eeprom *eeprom, u8 *eebuf)
6003{
6004 struct bnx2x *bp = netdev_priv(dev);
6005 int rc;
6006
6007 DP(NETIF_MSG_NVM, "ethtool_eeprom: cmd %d\n"
6008 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
6009 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
6010 eeprom->len, eeprom->len);
6011
6012 /* parameters already validated in ethtool_get_eeprom */
6013
6014 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6015
6016 return rc;
6017}
6018
6019static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
6020 u32 cmd_flags)
6021{
f1410647 6022 int count, i, rc;
a2fbb9ea
ET
6023
6024 /* build the command word */
6025 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
6026
6027 /* need to clear DONE bit separately */
6028 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
6029
6030 /* write the data */
6031 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
6032
6033 /* address of the NVRAM to write to */
6034 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
6035 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
6036
6037 /* issue the write command */
6038 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
6039
6040 /* adjust timeout for emulation/FPGA */
6041 count = NVRAM_TIMEOUT_COUNT;
6042 if (CHIP_REV_IS_SLOW(bp))
6043 count *= 100;
6044
6045 /* wait for completion */
6046 rc = -EBUSY;
6047 for (i = 0; i < count; i++) {
6048 udelay(5);
6049 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
6050 if (val & MCPR_NVM_COMMAND_DONE) {
6051 rc = 0;
6052 break;
6053 }
6054 }
6055
6056 return rc;
6057}
6058
f1410647 6059#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
6060
6061static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
6062 int buf_size)
6063{
6064 int rc;
6065 u32 cmd_flags;
6066 u32 align_offset;
6067 u32 val;
6068
6069 if (offset + buf_size > bp->flash_size) {
c14423fe 6070 DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea
ET
6071 " buf_size (0x%x) > flash_size (0x%x)\n",
6072 offset, buf_size, bp->flash_size);
6073 return -EINVAL;
6074 }
6075
6076 /* request access to nvram interface */
6077 rc = bnx2x_acquire_nvram_lock(bp);
6078 if (rc)
6079 return rc;
6080
6081 /* enable access to nvram interface */
6082 bnx2x_enable_nvram_access(bp);
6083
6084 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
6085 align_offset = (offset & ~0x03);
6086 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
6087
6088 if (rc == 0) {
6089 val &= ~(0xff << BYTE_OFFSET(offset));
6090 val |= (*data_buf << BYTE_OFFSET(offset));
6091
6092 /* nvram data is returned as an array of bytes
6093 * convert it back to cpu order */
6094 val = be32_to_cpu(val);
6095
6096 DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
6097
6098 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
6099 cmd_flags);
6100 }
6101
6102 /* disable access to nvram interface */
6103 bnx2x_disable_nvram_access(bp);
6104 bnx2x_release_nvram_lock(bp);
6105
6106 return rc;
6107}
6108
6109static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
6110 int buf_size)
6111{
6112 int rc;
6113 u32 cmd_flags;
6114 u32 val;
6115 u32 written_so_far;
6116
6117 if (buf_size == 1) { /* ethtool */
6118 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
6119 }
6120
6121 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
6122 DP(NETIF_MSG_NVM,
c14423fe 6123 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
6124 offset, buf_size);
6125 return -EINVAL;
6126 }
6127
6128 if (offset + buf_size > bp->flash_size) {
c14423fe 6129 DP(NETIF_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea
ET
6130 " buf_size (0x%x) > flash_size (0x%x)\n",
6131 offset, buf_size, bp->flash_size);
6132 return -EINVAL;
6133 }
6134
6135 /* request access to nvram interface */
6136 rc = bnx2x_acquire_nvram_lock(bp);
6137 if (rc)
6138 return rc;
6139
6140 /* enable access to nvram interface */
6141 bnx2x_enable_nvram_access(bp);
6142
6143 written_so_far = 0;
6144 cmd_flags = MCPR_NVM_COMMAND_FIRST;
6145 while ((written_so_far < buf_size) && (rc == 0)) {
6146 if (written_so_far == (buf_size - sizeof(u32)))
6147 cmd_flags |= MCPR_NVM_COMMAND_LAST;
6148 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
6149 cmd_flags |= MCPR_NVM_COMMAND_LAST;
6150 else if ((offset % NVRAM_PAGE_SIZE) == 0)
6151 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
6152
6153 memcpy(&val, data_buf, 4);
6154 DP(NETIF_MSG_NVM, "val 0x%08x\n", val);
6155
6156 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
6157
6158 /* advance to the next dword */
6159 offset += sizeof(u32);
6160 data_buf += sizeof(u32);
6161 written_so_far += sizeof(u32);
6162 cmd_flags = 0;
6163 }
6164
6165 /* disable access to nvram interface */
6166 bnx2x_disable_nvram_access(bp);
6167 bnx2x_release_nvram_lock(bp);
6168
6169 return rc;
6170}
6171
6172static int bnx2x_set_eeprom(struct net_device *dev,
6173 struct ethtool_eeprom *eeprom, u8 *eebuf)
6174{
6175 struct bnx2x *bp = netdev_priv(dev);
6176 int rc;
6177
6178 DP(NETIF_MSG_NVM, "ethtool_eeprom: cmd %d\n"
6179 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
6180 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
6181 eeprom->len, eeprom->len);
6182
6183 /* parameters already validated in ethtool_set_eeprom */
6184
c18487ee
YR
6185 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
6186 if (eeprom->magic == 0x00504859) {
6187
6188 bnx2x_phy_hw_lock(bp);
6189 rc = bnx2x_flash_download(bp, bp->port,
6190 bp->link_params.ext_phy_config,
6191 (bp->state != BNX2X_STATE_CLOSED),
6192 eebuf, eeprom->len);
6193 rc |= bnx2x_link_reset(&bp->link_params,
6194 &bp->link_vars);
6195 rc |= bnx2x_phy_init(&bp->link_params,
6196 &bp->link_vars);
6197 bnx2x_phy_hw_unlock(bp);
6198
6199 } else
6200 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
6201
6202 return rc;
6203}
6204
6205static int bnx2x_get_coalesce(struct net_device *dev,
6206 struct ethtool_coalesce *coal)
6207{
6208 struct bnx2x *bp = netdev_priv(dev);
6209
6210 memset(coal, 0, sizeof(struct ethtool_coalesce));
6211
6212 coal->rx_coalesce_usecs = bp->rx_ticks;
6213 coal->tx_coalesce_usecs = bp->tx_ticks;
6214 coal->stats_block_coalesce_usecs = bp->stats_ticks;
6215
6216 return 0;
6217}
6218
6219static int bnx2x_set_coalesce(struct net_device *dev,
6220 struct ethtool_coalesce *coal)
6221{
6222 struct bnx2x *bp = netdev_priv(dev);
6223
6224 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6225 if (bp->rx_ticks > 3000)
6226 bp->rx_ticks = 3000;
6227
6228 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6229 if (bp->tx_ticks > 0x3000)
6230 bp->tx_ticks = 0x3000;
6231
6232 bp->stats_ticks = coal->stats_block_coalesce_usecs;
6233 if (bp->stats_ticks > 0xffff00)
6234 bp->stats_ticks = 0xffff00;
6235 bp->stats_ticks &= 0xffff00;
6236
6237 if (netif_running(bp->dev))
6238 bnx2x_update_coalesce(bp);
6239
6240 return 0;
6241}
6242
6243static void bnx2x_get_ringparam(struct net_device *dev,
6244 struct ethtool_ringparam *ering)
6245{
6246 struct bnx2x *bp = netdev_priv(dev);
6247
6248 ering->rx_max_pending = MAX_RX_AVAIL;
6249 ering->rx_mini_max_pending = 0;
6250 ering->rx_jumbo_max_pending = 0;
6251
6252 ering->rx_pending = bp->rx_ring_size;
6253 ering->rx_mini_pending = 0;
6254 ering->rx_jumbo_pending = 0;
6255
6256 ering->tx_max_pending = MAX_TX_AVAIL;
6257 ering->tx_pending = bp->tx_ring_size;
6258}
6259
6260static int bnx2x_set_ringparam(struct net_device *dev,
6261 struct ethtool_ringparam *ering)
6262{
6263 struct bnx2x *bp = netdev_priv(dev);
6264
6265 if ((ering->rx_pending > MAX_RX_AVAIL) ||
6266 (ering->tx_pending > MAX_TX_AVAIL) ||
6267 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
6268 return -EINVAL;
6269
6270 bp->rx_ring_size = ering->rx_pending;
6271 bp->tx_ring_size = ering->tx_pending;
6272
6273 if (netif_running(bp->dev)) {
6274 bnx2x_nic_unload(bp, 0);
6275 bnx2x_nic_load(bp, 0);
6276 }
6277
6278 return 0;
6279}
6280
6281static void bnx2x_get_pauseparam(struct net_device *dev,
6282 struct ethtool_pauseparam *epause)
6283{
6284 struct bnx2x *bp = netdev_priv(dev);
6285
c18487ee
YR
6286 epause->autoneg = (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO) &&
6287 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
6288
6289 epause->rx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_RX) ==
6290 FLOW_CTRL_RX);
6291 epause->tx_pause = ((bp->link_vars.flow_ctrl & FLOW_CTRL_TX) ==
6292 FLOW_CTRL_TX);
a2fbb9ea
ET
6293
6294 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
6295 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
6296 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
6297}
6298
6299static int bnx2x_set_pauseparam(struct net_device *dev,
6300 struct ethtool_pauseparam *epause)
6301{
6302 struct bnx2x *bp = netdev_priv(dev);
6303
6304 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
6305 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
6306 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
6307
c18487ee 6308 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
a2fbb9ea 6309
f1410647 6310 if (epause->rx_pause)
c18487ee
YR
6311 bp->link_params.req_flow_ctrl |= FLOW_CTRL_RX;
6312
f1410647 6313 if (epause->tx_pause)
c18487ee
YR
6314 bp->link_params.req_flow_ctrl |= FLOW_CTRL_TX;
6315
6316 if (bp->link_params.req_flow_ctrl == FLOW_CTRL_AUTO)
6317 bp->link_params.req_flow_ctrl = FLOW_CTRL_NONE;
a2fbb9ea 6318
c18487ee
YR
6319 if (epause->autoneg) {
6320 if (!(bp->supported & SUPPORTED_Autoneg)) {
6321 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
6322 return -EINVAL;
6323 }
a2fbb9ea 6324
c18487ee
YR
6325 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
6326 bp->link_params.req_flow_ctrl = FLOW_CTRL_AUTO;
6327 }
a2fbb9ea 6328
c18487ee
YR
6329 DP(NETIF_MSG_LINK,
6330 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
a2fbb9ea 6331 bnx2x_stop_stats(bp);
c18487ee 6332 bnx2x_link_set(bp);
a2fbb9ea
ET
6333
6334 return 0;
6335}
6336
6337static u32 bnx2x_get_rx_csum(struct net_device *dev)
6338{
6339 struct bnx2x *bp = netdev_priv(dev);
6340
6341 return bp->rx_csum;
6342}
6343
6344static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
6345{
6346 struct bnx2x *bp = netdev_priv(dev);
6347
6348 bp->rx_csum = data;
6349 return 0;
6350}
6351
6352static int bnx2x_set_tso(struct net_device *dev, u32 data)
6353{
6354 if (data)
6355 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
6356 else
6357 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
6358 return 0;
6359}
6360
6361static struct {
6362 char string[ETH_GSTRING_LEN];
6363} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
6364 { "MC Errors (online)" }
6365};
6366
6367static int bnx2x_self_test_count(struct net_device *dev)
6368{
6369 return BNX2X_NUM_TESTS;
6370}
6371
6372static void bnx2x_self_test(struct net_device *dev,
6373 struct ethtool_test *etest, u64 *buf)
6374{
6375 struct bnx2x *bp = netdev_priv(dev);
6376 int stats_state;
6377
6378 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
6379
6380 if (bp->state != BNX2X_STATE_OPEN) {
6381 DP(NETIF_MSG_PROBE, "state is %x, returning\n", bp->state);
6382 return;
6383 }
6384
6385 stats_state = bp->stats_state;
6386 bnx2x_stop_stats(bp);
6387
6388 if (bnx2x_mc_assert(bp) != 0) {
6389 buf[0] = 1;
6390 etest->flags |= ETH_TEST_FL_FAILED;
6391 }
6392
6393#ifdef BNX2X_EXTRA_DEBUG
6394 bnx2x_panic_dump(bp);
6395#endif
6396 bp->stats_state = stats_state;
6397}
6398
6399static struct {
6400 char string[ETH_GSTRING_LEN];
6401} bnx2x_stats_str_arr[BNX2X_NUM_STATS] = {
0e39e645
ET
6402 { "rx_bytes"},
6403 { "rx_error_bytes"},
6404 { "tx_bytes"},
6405 { "tx_error_bytes"},
6406 { "rx_ucast_packets"},
6407 { "rx_mcast_packets"},
6408 { "rx_bcast_packets"},
6409 { "tx_ucast_packets"},
6410 { "tx_mcast_packets"},
6411 { "tx_bcast_packets"},
6412 { "tx_mac_errors"}, /* 10 */
6413 { "tx_carrier_errors"},
6414 { "rx_crc_errors"},
6415 { "rx_align_errors"},
6416 { "tx_single_collisions"},
6417 { "tx_multi_collisions"},
6418 { "tx_deferred"},
6419 { "tx_excess_collisions"},
6420 { "tx_late_collisions"},
6421 { "tx_total_collisions"},
6422 { "rx_fragments"}, /* 20 */
6423 { "rx_jabbers"},
6424 { "rx_undersize_packets"},
6425 { "rx_oversize_packets"},
6426 { "rx_xon_frames"},
6427 { "rx_xoff_frames"},
6428 { "tx_xon_frames"},
6429 { "tx_xoff_frames"},
6430 { "rx_mac_ctrl_frames"},
6431 { "rx_filtered_packets"},
6432 { "rx_discards"}, /* 30 */
6433 { "brb_discard"},
6434 { "brb_truncate"},
6435 { "xxoverflow"}
a2fbb9ea
ET
6436};
6437
6438#define STATS_OFFSET32(offset_name) \
6439 (offsetof(struct bnx2x_eth_stats, offset_name) / 4)
6440
6441static unsigned long bnx2x_stats_offset_arr[BNX2X_NUM_STATS] = {
0e39e645
ET
6442 STATS_OFFSET32(total_bytes_received_hi),
6443 STATS_OFFSET32(stat_IfHCInBadOctets_hi),
6444 STATS_OFFSET32(total_bytes_transmitted_hi),
6445 STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
6446 STATS_OFFSET32(total_unicast_packets_received_hi),
6447 STATS_OFFSET32(total_multicast_packets_received_hi),
6448 STATS_OFFSET32(total_broadcast_packets_received_hi),
6449 STATS_OFFSET32(total_unicast_packets_transmitted_hi),
6450 STATS_OFFSET32(total_multicast_packets_transmitted_hi),
6451 STATS_OFFSET32(total_broadcast_packets_transmitted_hi),
6452 STATS_OFFSET32(stat_Dot3statsInternalMacTransmitErrors), /* 10 */
6453 STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
6454 STATS_OFFSET32(crc_receive_errors),
6455 STATS_OFFSET32(alignment_errors),
6456 STATS_OFFSET32(single_collision_transmit_frames),
6457 STATS_OFFSET32(multiple_collision_transmit_frames),
6458 STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
6459 STATS_OFFSET32(excessive_collision_frames),
6460 STATS_OFFSET32(late_collision_frames),
6461 STATS_OFFSET32(number_of_bugs_found_in_stats_spec),
6462 STATS_OFFSET32(runt_packets_received), /* 20 */
6463 STATS_OFFSET32(jabber_packets_received),
6464 STATS_OFFSET32(error_runt_packets_received),
6465 STATS_OFFSET32(error_jabber_packets_received),
6466 STATS_OFFSET32(pause_xon_frames_received),
6467 STATS_OFFSET32(pause_xoff_frames_received),
6468 STATS_OFFSET32(pause_xon_frames_transmitted),
6469 STATS_OFFSET32(pause_xoff_frames_transmitted),
6470 STATS_OFFSET32(control_frames_received),
6471 STATS_OFFSET32(mac_filter_discard),
6472 STATS_OFFSET32(no_buff_discard), /* 30 */
6473 STATS_OFFSET32(brb_discard),
6474 STATS_OFFSET32(brb_truncate_discard),
6475 STATS_OFFSET32(xxoverflow_discard)
a2fbb9ea
ET
6476};
6477
6478static u8 bnx2x_stats_len_arr[BNX2X_NUM_STATS] = {
6479 8, 0, 8, 0, 8, 8, 8, 8, 8, 8,
6480 4, 0, 4, 4, 4, 4, 4, 4, 4, 4,
6481 4, 4, 4, 4, 4, 4, 4, 4, 4, 4,
0e39e645 6482 4, 4, 4, 4
a2fbb9ea
ET
6483};
6484
6485static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
6486{
6487 switch (stringset) {
6488 case ETH_SS_STATS:
6489 memcpy(buf, bnx2x_stats_str_arr, sizeof(bnx2x_stats_str_arr));
6490 break;
6491
6492 case ETH_SS_TEST:
6493 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
6494 break;
6495 }
6496}
6497
6498static int bnx2x_get_stats_count(struct net_device *dev)
6499{
6500 return BNX2X_NUM_STATS;
6501}
6502
6503static void bnx2x_get_ethtool_stats(struct net_device *dev,
6504 struct ethtool_stats *stats, u64 *buf)
6505{
6506 struct bnx2x *bp = netdev_priv(dev);
6507 u32 *hw_stats = (u32 *)bnx2x_sp_check(bp, eth_stats);
6508 int i;
6509
6510 for (i = 0; i < BNX2X_NUM_STATS; i++) {
6511 if (bnx2x_stats_len_arr[i] == 0) {
6512 /* skip this counter */
6513 buf[i] = 0;
6514 continue;
6515 }
6516 if (!hw_stats) {
6517 buf[i] = 0;
6518 continue;
6519 }
6520 if (bnx2x_stats_len_arr[i] == 4) {
6521 /* 4-byte counter */
6522 buf[i] = (u64) *(hw_stats + bnx2x_stats_offset_arr[i]);
6523 continue;
6524 }
6525 /* 8-byte counter */
6526 buf[i] = HILO_U64(*(hw_stats + bnx2x_stats_offset_arr[i]),
6527 *(hw_stats + bnx2x_stats_offset_arr[i] + 1));
6528 }
6529}
6530
6531static int bnx2x_phys_id(struct net_device *dev, u32 data)
6532{
6533 struct bnx2x *bp = netdev_priv(dev);
6534 int i;
6535
6536 if (data == 0)
6537 data = 2;
6538
6539 for (i = 0; i < (data * 2); i++) {
c18487ee
YR
6540 if ((i % 2) == 0)
6541 bnx2x_set_led(bp, bp->port, LED_MODE_OPER, SPEED_1000,
6542 bp->link_params.hw_led_mode,
6543 bp->link_params.chip_id);
6544 else
6545 bnx2x_set_led(bp, bp->port, LED_MODE_OFF, 0,
6546 bp->link_params.hw_led_mode,
6547 bp->link_params.chip_id);
6548
a2fbb9ea
ET
6549 msleep_interruptible(500);
6550 if (signal_pending(current))
6551 break;
6552 }
6553
c18487ee
YR
6554 if (bp->link_vars.link_up)
6555 bnx2x_set_led(bp, bp->port, LED_MODE_OPER,
6556 bp->link_vars.line_speed,
6557 bp->link_params.hw_led_mode,
6558 bp->link_params.chip_id);
a2fbb9ea
ET
6559
6560 return 0;
6561}
6562
6563static struct ethtool_ops bnx2x_ethtool_ops = {
6564 .get_settings = bnx2x_get_settings,
6565 .set_settings = bnx2x_set_settings,
6566 .get_drvinfo = bnx2x_get_drvinfo,
6567 .get_wol = bnx2x_get_wol,
6568 .set_wol = bnx2x_set_wol,
6569 .get_msglevel = bnx2x_get_msglevel,
6570 .set_msglevel = bnx2x_set_msglevel,
6571 .nway_reset = bnx2x_nway_reset,
6572 .get_link = ethtool_op_get_link,
6573 .get_eeprom_len = bnx2x_get_eeprom_len,
6574 .get_eeprom = bnx2x_get_eeprom,
6575 .set_eeprom = bnx2x_set_eeprom,
6576 .get_coalesce = bnx2x_get_coalesce,
6577 .set_coalesce = bnx2x_set_coalesce,
6578 .get_ringparam = bnx2x_get_ringparam,
6579 .set_ringparam = bnx2x_set_ringparam,
6580 .get_pauseparam = bnx2x_get_pauseparam,
6581 .set_pauseparam = bnx2x_set_pauseparam,
6582 .get_rx_csum = bnx2x_get_rx_csum,
6583 .set_rx_csum = bnx2x_set_rx_csum,
6584 .get_tx_csum = ethtool_op_get_tx_csum,
6585 .set_tx_csum = ethtool_op_set_tx_csum,
6586 .get_sg = ethtool_op_get_sg,
6587 .set_sg = ethtool_op_set_sg,
6588 .get_tso = ethtool_op_get_tso,
6589 .set_tso = bnx2x_set_tso,
6590 .self_test_count = bnx2x_self_test_count,
6591 .self_test = bnx2x_self_test,
6592 .get_strings = bnx2x_get_strings,
6593 .phys_id = bnx2x_phys_id,
6594 .get_stats_count = bnx2x_get_stats_count,
6595 .get_ethtool_stats = bnx2x_get_ethtool_stats
6596};
6597
6598/* end of ethtool_ops */
6599
6600/****************************************************************************
6601* General service functions
6602****************************************************************************/
6603
6604static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
6605{
6606 u16 pmcsr;
6607
6608 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
6609
6610 switch (state) {
6611 case PCI_D0:
6612 pci_write_config_word(bp->pdev,
6613 bp->pm_cap + PCI_PM_CTRL,
6614 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
6615 PCI_PM_CTRL_PME_STATUS));
6616
6617 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
6618 /* delay required during transition out of D3hot */
6619 msleep(20);
6620 break;
6621
6622 case PCI_D3hot:
6623 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
6624 pmcsr |= 3;
6625
6626 if (bp->wol)
6627 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
6628
6629 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
6630 pmcsr);
6631
6632 /* No more memory access after this point until
6633 * device is brought back to D0.
6634 */
6635 break;
6636
6637 default:
6638 return -EINVAL;
6639 }
6640 return 0;
6641}
6642
6643/*
6644 * net_device service functions
6645 */
6646
49d66772 6647/* called with netif_tx_lock from set_multicast */
a2fbb9ea
ET
6648static void bnx2x_set_rx_mode(struct net_device *dev)
6649{
6650 struct bnx2x *bp = netdev_priv(dev);
6651 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
6652
6653 DP(NETIF_MSG_IFUP, "called dev->flags = %x\n", dev->flags);
6654
6655 if (dev->flags & IFF_PROMISC)
6656 rx_mode = BNX2X_RX_MODE_PROMISC;
6657
6658 else if ((dev->flags & IFF_ALLMULTI) ||
6659 (dev->mc_count > BNX2X_MAX_MULTICAST))
6660 rx_mode = BNX2X_RX_MODE_ALLMULTI;
6661
6662 else { /* some multicasts */
6663 int i, old, offset;
6664 struct dev_mc_list *mclist;
6665 struct mac_configuration_cmd *config =
6666 bnx2x_sp(bp, mcast_config);
6667
6668 for (i = 0, mclist = dev->mc_list;
6669 mclist && (i < dev->mc_count);
6670 i++, mclist = mclist->next) {
6671
6672 config->config_table[i].cam_entry.msb_mac_addr =
6673 swab16(*(u16 *)&mclist->dmi_addr[0]);
6674 config->config_table[i].cam_entry.middle_mac_addr =
6675 swab16(*(u16 *)&mclist->dmi_addr[2]);
6676 config->config_table[i].cam_entry.lsb_mac_addr =
6677 swab16(*(u16 *)&mclist->dmi_addr[4]);
6678 config->config_table[i].cam_entry.flags =
6679 cpu_to_le16(bp->port);
6680 config->config_table[i].target_table_entry.flags = 0;
6681 config->config_table[i].target_table_entry.
6682 client_id = 0;
6683 config->config_table[i].target_table_entry.
6684 vlan_id = 0;
6685
6686 DP(NETIF_MSG_IFUP,
6687 "setting MCAST[%d] (%04x:%04x:%04x)\n",
6688 i, config->config_table[i].cam_entry.msb_mac_addr,
6689 config->config_table[i].cam_entry.middle_mac_addr,
6690 config->config_table[i].cam_entry.lsb_mac_addr);
6691 }
6692 old = config->hdr.length_6b;
6693 if (old > i) {
6694 for (; i < old; i++) {
6695 if (CAM_IS_INVALID(config->config_table[i])) {
6696 i--; /* already invalidated */
6697 break;
6698 }
6699 /* invalidate */
6700 CAM_INVALIDATE(config->config_table[i]);
6701 }
6702 }
6703
6704 if (CHIP_REV_IS_SLOW(bp))
6705 offset = BNX2X_MAX_EMUL_MULTI*(1 + bp->port);
6706 else
6707 offset = BNX2X_MAX_MULTICAST*(1 + bp->port);
6708
6709 config->hdr.length_6b = i;
6710 config->hdr.offset = offset;
6711 config->hdr.reserved0 = 0;
6712 config->hdr.reserved1 = 0;
6713
6714 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6715 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6716 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
6717 }
6718
6719 bp->rx_mode = rx_mode;
6720 bnx2x_set_storm_rx_mode(bp);
6721}
6722
6723static int bnx2x_poll(struct napi_struct *napi, int budget)
6724{
6725 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
6726 napi);
6727 struct bnx2x *bp = fp->bp;
6728 int work_done = 0;
6729
6730#ifdef BNX2X_STOP_ON_ERROR
6731 if (unlikely(bp->panic))
6732 goto out_panic;
6733#endif
6734
6735 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
6736 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
6737 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
6738
6739 bnx2x_update_fpsb_idx(fp);
6740
6741 if (le16_to_cpu(*fp->tx_cons_sb) != fp->tx_pkt_cons)
6742 bnx2x_tx_int(fp, budget);
6743
6744
6745 if (le16_to_cpu(*fp->rx_cons_sb) != fp->rx_comp_cons)
6746 work_done = bnx2x_rx_int(fp, budget);
6747
6748
6749 rmb(); /* bnx2x_has_work() reads the status block */
6750
6751 /* must not complete if we consumed full budget */
6752 if ((work_done < budget) && !bnx2x_has_work(fp)) {
6753
6754#ifdef BNX2X_STOP_ON_ERROR
6755out_panic:
6756#endif
6757 netif_rx_complete(bp->dev, napi);
6758
6759 bnx2x_ack_sb(bp, fp->index, USTORM_ID,
6760 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
6761 bnx2x_ack_sb(bp, fp->index, CSTORM_ID,
6762 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
6763 }
6764
6765 return work_done;
6766}
6767
6768/* Called with netif_tx_lock.
6769 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
6770 * netif_wake_queue().
6771 */
6772static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
6773{
6774 struct bnx2x *bp = netdev_priv(dev);
6775 struct bnx2x_fastpath *fp;
6776 struct sw_tx_bd *tx_buf;
6777 struct eth_tx_bd *tx_bd;
6778 struct eth_tx_parse_bd *pbd = NULL;
6779 u16 pkt_prod, bd_prod;
6780 int nbd, fp_index = 0;
6781 dma_addr_t mapping;
6782
6783#ifdef BNX2X_STOP_ON_ERROR
6784 if (unlikely(bp->panic))
6785 return NETDEV_TX_BUSY;
6786#endif
6787
6788 fp_index = smp_processor_id() % (bp->num_queues);
6789
6790 fp = &bp->fp[fp_index];
6791 if (unlikely(bnx2x_tx_avail(bp->fp) <
6792 (skb_shinfo(skb)->nr_frags + 3))) {
6793 bp->slowpath->eth_stats.driver_xoff++,
6794 netif_stop_queue(dev);
6795 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
6796 return NETDEV_TX_BUSY;
6797 }
6798
6799 /*
6800 This is a bit ugly. First we use one BD which we mark as start,
6801 then for TSO or xsum we have a parsing info BD,
6802 and only then we have the rest of the TSO bds.
6803 (don't forget to mark the last one as last,
6804 and to unmap only AFTER you write to the BD ...)
6805 I would like to thank DovH for this mess.
6806 */
6807
6808 pkt_prod = fp->tx_pkt_prod++;
6809 bd_prod = fp->tx_bd_prod;
6810 bd_prod = TX_BD(bd_prod);
6811
6812 /* get a tx_buff and first bd */
6813 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
6814 tx_bd = &fp->tx_desc_ring[bd_prod];
6815
6816 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
6817 tx_bd->general_data = (UNICAST_ADDRESS <<
6818 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
6819 tx_bd->general_data |= 1; /* header nbd */
6820
c14423fe 6821 /* remember the first bd of the packet */
a2fbb9ea
ET
6822 tx_buf->first_bd = bd_prod;
6823
6824 DP(NETIF_MSG_TX_QUEUED,
6825 "sending pkt %u @%p next_idx %u bd %u @%p\n",
6826 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
6827
6828 if (skb->ip_summed == CHECKSUM_PARTIAL) {
6829 struct iphdr *iph = ip_hdr(skb);
6830 u8 len;
6831
6832 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IP_CSUM;
6833
6834 /* turn on parsing and get a bd */
6835 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6836 pbd = (void *)&fp->tx_desc_ring[bd_prod];
6837 len = ((u8 *)iph - (u8 *)skb->data) / 2;
6838
6839 /* for now NS flag is not used in Linux */
6840 pbd->global_data = (len |
96fc1784 6841 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
a2fbb9ea
ET
6842 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
6843 pbd->ip_hlen = ip_hdrlen(skb) / 2;
6844 pbd->total_hlen = cpu_to_le16(len + pbd->ip_hlen);
6845 if (iph->protocol == IPPROTO_TCP) {
6846 struct tcphdr *th = tcp_hdr(skb);
6847
6848 tx_bd->bd_flags.as_bitfield |=
6849 ETH_TX_BD_FLAGS_TCP_CSUM;
96fc1784 6850 pbd->tcp_flags = pbd_tcp_flags(skb);
a2fbb9ea
ET
6851 pbd->total_hlen += cpu_to_le16(tcp_hdrlen(skb) / 2);
6852 pbd->tcp_pseudo_csum = swab16(th->check);
6853
6854 } else if (iph->protocol == IPPROTO_UDP) {
6855 struct udphdr *uh = udp_hdr(skb);
6856
6857 tx_bd->bd_flags.as_bitfield |=
6858 ETH_TX_BD_FLAGS_TCP_CSUM;
6859 pbd->total_hlen += cpu_to_le16(4);
6860 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
6861 pbd->cs_offset = 5; /* 10 >> 1 */
6862 pbd->tcp_pseudo_csum = 0;
6863 /* HW bug: we need to subtract 10 bytes before the
6864 * UDP header from the csum
6865 */
6866 uh->check = (u16) ~csum_fold(csum_sub(uh->check,
6867 csum_partial(((u8 *)(uh)-10), 10, 0)));
6868 }
6869 }
6870
6871 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb)) {
6872 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
6873 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
6874 } else {
6875 tx_bd->vlan = cpu_to_le16(pkt_prod);
6876 }
6877
6878 mapping = pci_map_single(bp->pdev, skb->data,
6879 skb->len, PCI_DMA_TODEVICE);
6880
6881 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
6882 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6883 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL)? 1 : 2);
6884 tx_bd->nbd = cpu_to_le16(nbd);
6885 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
6886
6887 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
6888 " nbytes %d flags %x vlan %u\n",
6889 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, tx_bd->nbd,
6890 tx_bd->nbytes, tx_bd->bd_flags.as_bitfield, tx_bd->vlan);
6891
6892 if (skb_shinfo(skb)->gso_size &&
6893 (skb->len > (bp->dev->mtu + ETH_HLEN))) {
96fc1784 6894 int hlen = 2 * le16_to_cpu(pbd->total_hlen);
a2fbb9ea
ET
6895
6896 DP(NETIF_MSG_TX_QUEUED,
6897 "TSO packet len %d hlen %d total len %d tso size %d\n",
6898 skb->len, hlen, skb_headlen(skb),
6899 skb_shinfo(skb)->gso_size);
6900
6901 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
6902
6903 if (tx_bd->nbytes > cpu_to_le16(hlen)) {
6904 /* we split the first bd into headers and data bds
6905 * to ease the pain of our fellow micocode engineers
6906 * we use one mapping for both bds
6907 * So far this has only been observed to happen
6908 * in Other Operating Systems(TM)
6909 */
6910
6911 /* first fix first bd */
6912 nbd++;
6913 tx_bd->nbd = cpu_to_le16(nbd);
6914 tx_bd->nbytes = cpu_to_le16(hlen);
6915
6916 /* we only print this as an error
6917 * because we don't think this will ever happen.
6918 */
6919 BNX2X_ERR("TSO split header size is %d (%x:%x)"
6920 " nbd %d\n", tx_bd->nbytes, tx_bd->addr_hi,
6921 tx_bd->addr_lo, tx_bd->nbd);
6922
6923 /* now get a new data bd
6924 * (after the pbd) and fill it */
6925 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6926 tx_bd = &fp->tx_desc_ring[bd_prod];
6927
6928 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
6929 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping) + hlen);
6930 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb) - hlen);
6931 tx_bd->vlan = cpu_to_le16(pkt_prod);
6932 /* this marks the bd
6933 * as one that has no individual mapping
c14423fe 6934 * the FW ignores this flag in a bd not marked start
a2fbb9ea
ET
6935 */
6936 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
6937 DP(NETIF_MSG_TX_QUEUED,
6938 "TSO split data size is %d (%x:%x)\n",
6939 tx_bd->nbytes, tx_bd->addr_hi, tx_bd->addr_lo);
6940 }
6941
6942 if (!pbd) {
6943 /* supposed to be unreached
6944 * (and therefore not handled properly...)
6945 */
6946 BNX2X_ERR("LSO with no PBD\n");
6947 BUG();
6948 }
6949
6950 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
6951 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
6952 pbd->ip_id = swab16(ip_hdr(skb)->id);
6953 pbd->tcp_pseudo_csum =
6954 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
6955 ip_hdr(skb)->daddr,
6956 0, IPPROTO_TCP, 0));
6957 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
6958 }
6959
6960 {
6961 int i;
6962
6963 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
6964 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6965
6966 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6967 tx_bd = &fp->tx_desc_ring[bd_prod];
6968
6969 mapping = pci_map_page(bp->pdev, frag->page,
6970 frag->page_offset,
6971 frag->size, PCI_DMA_TODEVICE);
6972
6973 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
6974 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6975 tx_bd->nbytes = cpu_to_le16(frag->size);
6976 tx_bd->vlan = cpu_to_le16(pkt_prod);
6977 tx_bd->bd_flags.as_bitfield = 0;
6978 DP(NETIF_MSG_TX_QUEUED, "frag %d bd @%p"
6979 " addr (%x:%x) nbytes %d flags %x\n",
6980 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
6981 tx_bd->nbytes, tx_bd->bd_flags.as_bitfield);
6982 } /* for */
6983 }
6984
6985 /* now at last mark the bd as the last bd */
6986 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
6987
6988 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
6989 tx_bd, tx_bd->bd_flags.as_bitfield);
6990
6991 tx_buf->skb = skb;
6992
6993 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
6994
6995 /* now send a tx doorbell, counting the next bd
6996 * if the packet contains or ends with it
6997 */
6998 if (TX_BD_POFF(bd_prod) < nbd)
6999 nbd++;
7000
7001 if (pbd)
7002 DP(NETIF_MSG_TX_QUEUED,
7003 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
7004 " tcp_flags %x xsum %x seq %u hlen %u\n",
7005 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
7006 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
7007 pbd->tcp_send_seq, pbd->total_hlen);
7008
7009 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %u bd %d\n", nbd, bd_prod);
7010
96fc1784
ET
7011 fp->hw_tx_prods->bds_prod =
7012 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
a2fbb9ea 7013 mb(); /* FW restriction: must not reorder writing nbd and packets */
96fc1784
ET
7014 fp->hw_tx_prods->packets_prod =
7015 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
a2fbb9ea
ET
7016 DOORBELL(bp, fp_index, 0);
7017
7018 mmiowb();
7019
7020 fp->tx_bd_prod = bd_prod;
7021 dev->trans_start = jiffies;
7022
7023 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
7024 netif_stop_queue(dev);
7025 bp->slowpath->eth_stats.driver_xoff++;
7026 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
7027 netif_wake_queue(dev);
7028 }
7029 fp->tx_pkt++;
7030
7031 return NETDEV_TX_OK;
7032}
7033
a2fbb9ea
ET
7034/* Called with rtnl_lock */
7035static int bnx2x_open(struct net_device *dev)
7036{
7037 struct bnx2x *bp = netdev_priv(dev);
7038
7039 bnx2x_set_power_state(bp, PCI_D0);
7040
7041 return bnx2x_nic_load(bp, 1);
7042}
7043
7044/* Called with rtnl_lock */
7045static int bnx2x_close(struct net_device *dev)
7046{
a2fbb9ea
ET
7047 struct bnx2x *bp = netdev_priv(dev);
7048
7049 /* Unload the driver, release IRQs */
228241eb
ET
7050 bnx2x_nic_unload(bp, 1);
7051
7052 if (!CHIP_REV_IS_SLOW(bp))
7053 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
7054
7055 return 0;
7056}
7057
7058/* Called with rtnl_lock */
7059static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
7060{
7061 struct sockaddr *addr = p;
7062 struct bnx2x *bp = netdev_priv(dev);
7063
7064 if (!is_valid_ether_addr(addr->sa_data))
7065 return -EINVAL;
7066
7067 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7068 if (netif_running(dev))
7069 bnx2x_set_mac_addr(bp);
7070
7071 return 0;
7072}
7073
c18487ee 7074/* called with rtnl_lock */
a2fbb9ea
ET
7075static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7076{
7077 struct mii_ioctl_data *data = if_mii(ifr);
7078 struct bnx2x *bp = netdev_priv(dev);
7079 int err;
7080
7081 switch (cmd) {
7082 case SIOCGMIIPHY:
7083 data->phy_id = bp->phy_addr;
7084
c14423fe 7085 /* fallthrough */
c18487ee 7086
a2fbb9ea 7087 case SIOCGMIIREG: {
c18487ee 7088 u16 mii_regval;
a2fbb9ea 7089
c18487ee
YR
7090 if (!netif_running(dev))
7091 return -EAGAIN;
a2fbb9ea 7092
c18487ee
YR
7093 mutex_lock(&bp->phy_mutex);
7094 err = bnx2x_cl45_read(bp, bp->port, 0, bp->phy_addr,
7095 DEFAULT_PHY_DEV_ADDR,
7096 (data->reg_num & 0x1f), &mii_regval);
7097 data->val_out = mii_regval;
7098 mutex_unlock(&bp->phy_mutex);
a2fbb9ea
ET
7099 return err;
7100 }
7101
7102 case SIOCSMIIREG:
7103 if (!capable(CAP_NET_ADMIN))
7104 return -EPERM;
7105
c18487ee
YR
7106 if (!netif_running(dev))
7107 return -EAGAIN;
7108
7109 mutex_lock(&bp->phy_mutex);
7110 err = bnx2x_cl45_write(bp, bp->port, 0, bp->phy_addr,
7111 DEFAULT_PHY_DEV_ADDR,
7112 (data->reg_num & 0x1f), data->val_in);
7113 mutex_unlock(&bp->phy_mutex);
a2fbb9ea
ET
7114 return err;
7115
7116 default:
7117 /* do nothing */
7118 break;
7119 }
7120
7121 return -EOPNOTSUPP;
7122}
7123
7124/* Called with rtnl_lock */
7125static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
7126{
7127 struct bnx2x *bp = netdev_priv(dev);
7128
7129 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
7130 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
7131 return -EINVAL;
7132
7133 /* This does not race with packet allocation
c14423fe 7134 * because the actual alloc size is
a2fbb9ea
ET
7135 * only updated as part of load
7136 */
7137 dev->mtu = new_mtu;
7138
7139 if (netif_running(dev)) {
7140 bnx2x_nic_unload(bp, 0);
7141 bnx2x_nic_load(bp, 0);
7142 }
7143 return 0;
7144}
7145
7146static void bnx2x_tx_timeout(struct net_device *dev)
7147{
7148 struct bnx2x *bp = netdev_priv(dev);
7149
7150#ifdef BNX2X_STOP_ON_ERROR
7151 if (!bp->panic)
7152 bnx2x_panic();
7153#endif
7154 /* This allows the netif to be shutdown gracefully before resetting */
7155 schedule_work(&bp->reset_task);
7156}
7157
7158#ifdef BCM_VLAN
7159/* Called with rtnl_lock */
7160static void bnx2x_vlan_rx_register(struct net_device *dev,
7161 struct vlan_group *vlgrp)
7162{
7163 struct bnx2x *bp = netdev_priv(dev);
7164
7165 bp->vlgrp = vlgrp;
7166 if (netif_running(dev))
49d66772 7167 bnx2x_set_client_config(bp);
a2fbb9ea
ET
7168}
7169#endif
7170
7171#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7172static void poll_bnx2x(struct net_device *dev)
7173{
7174 struct bnx2x *bp = netdev_priv(dev);
7175
7176 disable_irq(bp->pdev->irq);
7177 bnx2x_interrupt(bp->pdev->irq, dev);
7178 enable_irq(bp->pdev->irq);
7179}
7180#endif
7181
7182static void bnx2x_reset_task(struct work_struct *work)
7183{
7184 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7185
7186#ifdef BNX2X_STOP_ON_ERROR
7187 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7188 " so reset not done to allow debug dump,\n"
7189 KERN_ERR " you will need to reboot when done\n");
7190 return;
7191#endif
7192
7193 if (!netif_running(bp->dev))
7194 return;
7195
228241eb 7196 rtnl_lock();
a2fbb9ea 7197
228241eb
ET
7198 if (bp->state != BNX2X_STATE_OPEN) {
7199 DP(NETIF_MSG_TX_ERR, "state is %x, returning\n", bp->state);
7200 goto reset_task_exit;
7201 }
a2fbb9ea
ET
7202
7203 bnx2x_nic_unload(bp, 0);
7204 bnx2x_nic_load(bp, 0);
7205
228241eb
ET
7206reset_task_exit:
7207 rtnl_unlock();
a2fbb9ea
ET
7208}
7209
7210static int __devinit bnx2x_init_board(struct pci_dev *pdev,
7211 struct net_device *dev)
7212{
7213 struct bnx2x *bp;
7214 int rc;
7215
7216 SET_NETDEV_DEV(dev, &pdev->dev);
7217 bp = netdev_priv(dev);
7218
7219 bp->flags = 0;
7220 bp->port = PCI_FUNC(pdev->devfn);
7221
7222 rc = pci_enable_device(pdev);
7223 if (rc) {
7224 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
7225 goto err_out;
7226 }
7227
7228 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7229 printk(KERN_ERR PFX "Cannot find PCI device base address,"
7230 " aborting\n");
7231 rc = -ENODEV;
7232 goto err_out_disable;
7233 }
7234
7235 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
7236 printk(KERN_ERR PFX "Cannot find second PCI device"
7237 " base address, aborting\n");
7238 rc = -ENODEV;
7239 goto err_out_disable;
7240 }
7241
7242 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7243 if (rc) {
7244 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
7245 " aborting\n");
7246 goto err_out_disable;
7247 }
7248
7249 pci_set_master(pdev);
7250
7251 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7252 if (bp->pm_cap == 0) {
7253 printk(KERN_ERR PFX "Cannot find power management"
7254 " capability, aborting\n");
7255 rc = -EIO;
7256 goto err_out_release;
7257 }
7258
7259 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
7260 if (bp->pcie_cap == 0) {
7261 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
7262 " aborting\n");
7263 rc = -EIO;
7264 goto err_out_release;
7265 }
7266
7267 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
7268 bp->flags |= USING_DAC_FLAG;
7269 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
7270 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
7271 " failed, aborting\n");
7272 rc = -EIO;
7273 goto err_out_release;
7274 }
7275
7276 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
7277 printk(KERN_ERR PFX "System does not support DMA,"
7278 " aborting\n");
7279 rc = -EIO;
7280 goto err_out_release;
7281 }
7282
7283 bp->dev = dev;
7284 bp->pdev = pdev;
7285
a2fbb9ea
ET
7286 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7287 INIT_WORK(&bp->sp_task, bnx2x_sp_task);
7288
cba0516d 7289 dev->base_addr = pci_resource_start(pdev, 0);
a2fbb9ea
ET
7290
7291 dev->irq = pdev->irq;
7292
7293 bp->regview = ioremap_nocache(dev->base_addr,
7294 pci_resource_len(pdev, 0));
7295 if (!bp->regview) {
7296 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
7297 rc = -ENOMEM;
7298 goto err_out_release;
7299 }
7300
7301 bp->doorbells = ioremap_nocache(pci_resource_start(pdev , 2),
7302 pci_resource_len(pdev, 2));
7303 if (!bp->doorbells) {
7304 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
7305 rc = -ENOMEM;
7306 goto err_out_unmap;
7307 }
7308
7309 bnx2x_set_power_state(bp, PCI_D0);
7310
7311 bnx2x_get_hwinfo(bp);
7312
a2fbb9ea
ET
7313
7314 if (nomcp) {
7315 printk(KERN_ERR PFX "MCP disabled, will only"
7316 " init first device\n");
7317 onefunc = 1;
7318 }
7319
7320 if (onefunc && bp->port) {
7321 printk(KERN_ERR PFX "Second device disabled, exiting\n");
7322 rc = -ENODEV;
7323 goto err_out_unmap;
7324 }
7325
7326 bp->tx_ring_size = MAX_TX_AVAIL;
7327 bp->rx_ring_size = MAX_RX_AVAIL;
7328
7329 bp->rx_csum = 1;
7330
7331 bp->rx_offset = 0;
7332
7333 bp->tx_quick_cons_trip_int = 0xff;
7334 bp->tx_quick_cons_trip = 0xff;
7335 bp->tx_ticks_int = 50;
7336 bp->tx_ticks = 50;
7337
7338 bp->rx_quick_cons_trip_int = 0xff;
7339 bp->rx_quick_cons_trip = 0xff;
7340 bp->rx_ticks_int = 25;
7341 bp->rx_ticks = 25;
7342
7343 bp->stats_ticks = 1000000 & 0xffff00;
7344
7345 bp->timer_interval = HZ;
7346 bp->current_interval = (poll ? poll : HZ);
7347
7348 init_timer(&bp->timer);
7349 bp->timer.expires = jiffies + bp->current_interval;
7350 bp->timer.data = (unsigned long) bp;
7351 bp->timer.function = bnx2x_timer;
7352
7353 return 0;
7354
7355err_out_unmap:
7356 if (bp->regview) {
7357 iounmap(bp->regview);
7358 bp->regview = NULL;
7359 }
7360
7361 if (bp->doorbells) {
7362 iounmap(bp->doorbells);
7363 bp->doorbells = NULL;
7364 }
7365
7366err_out_release:
7367 pci_release_regions(pdev);
7368
7369err_out_disable:
7370 pci_disable_device(pdev);
7371 pci_set_drvdata(pdev, NULL);
7372
7373err_out:
7374 return rc;
7375}
7376
25047950
ET
7377static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
7378{
7379 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
7380
7381 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
7382 return val;
7383}
7384
7385/* return value of 1=2.5GHz 2=5GHz */
7386static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
7387{
7388 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
7389
7390 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
7391 return val;
7392}
7393
a2fbb9ea
ET
7394static int __devinit bnx2x_init_one(struct pci_dev *pdev,
7395 const struct pci_device_id *ent)
7396{
7397 static int version_printed;
7398 struct net_device *dev = NULL;
7399 struct bnx2x *bp;
25047950 7400 int rc;
a2fbb9ea 7401 int port = PCI_FUNC(pdev->devfn);
25047950 7402 DECLARE_MAC_BUF(mac);
a2fbb9ea
ET
7403
7404 if (version_printed++ == 0)
7405 printk(KERN_INFO "%s", version);
7406
7407 /* dev zeroed in init_etherdev */
7408 dev = alloc_etherdev(sizeof(*bp));
7409 if (!dev)
7410 return -ENOMEM;
7411
7412 netif_carrier_off(dev);
7413
7414 bp = netdev_priv(dev);
7415 bp->msglevel = debug;
7416
7417 if (port && onefunc) {
7418 printk(KERN_ERR PFX "second function disabled. exiting\n");
25047950 7419 free_netdev(dev);
a2fbb9ea
ET
7420 return 0;
7421 }
7422
7423 rc = bnx2x_init_board(pdev, dev);
7424 if (rc < 0) {
7425 free_netdev(dev);
7426 return rc;
7427 }
7428
7429 dev->hard_start_xmit = bnx2x_start_xmit;
7430 dev->watchdog_timeo = TX_TIMEOUT;
7431
a2fbb9ea
ET
7432 dev->ethtool_ops = &bnx2x_ethtool_ops;
7433 dev->open = bnx2x_open;
7434 dev->stop = bnx2x_close;
7435 dev->set_multicast_list = bnx2x_set_rx_mode;
7436 dev->set_mac_address = bnx2x_change_mac_addr;
7437 dev->do_ioctl = bnx2x_ioctl;
7438 dev->change_mtu = bnx2x_change_mtu;
7439 dev->tx_timeout = bnx2x_tx_timeout;
7440#ifdef BCM_VLAN
7441 dev->vlan_rx_register = bnx2x_vlan_rx_register;
7442#endif
7443#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7444 dev->poll_controller = poll_bnx2x;
7445#endif
7446 dev->features |= NETIF_F_SG;
7447 if (bp->flags & USING_DAC_FLAG)
7448 dev->features |= NETIF_F_HIGHDMA;
7449 dev->features |= NETIF_F_IP_CSUM;
7450#ifdef BCM_VLAN
7451 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7452#endif
7453 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7454
7455 rc = register_netdev(dev);
7456 if (rc) {
c14423fe 7457 dev_err(&pdev->dev, "Cannot register net device\n");
a2fbb9ea
ET
7458 if (bp->regview)
7459 iounmap(bp->regview);
7460 if (bp->doorbells)
7461 iounmap(bp->doorbells);
7462 pci_release_regions(pdev);
7463 pci_disable_device(pdev);
7464 pci_set_drvdata(pdev, NULL);
7465 free_netdev(dev);
7466 return rc;
7467 }
7468
7469 pci_set_drvdata(pdev, dev);
7470
7471 bp->name = board_info[ent->driver_data].name;
25047950
ET
7472 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
7473 " IRQ %d, ", dev->name, bp->name,
a2fbb9ea
ET
7474 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
7475 ((CHIP_ID(bp) & 0x0ff0) >> 4),
25047950
ET
7476 bnx2x_get_pcie_width(bp),
7477 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
7478 dev->base_addr, bp->pdev->irq);
7479 printk(KERN_CONT "node addr %s\n", print_mac(mac, dev->dev_addr));
a2fbb9ea
ET
7480 return 0;
7481}
7482
7483static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
7484{
7485 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
7486 struct bnx2x *bp;
7487
7488 if (!dev) {
7489 /* we get here if init_one() fails */
7490 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
7491 return;
7492 }
7493
7494 bp = netdev_priv(dev);
a2fbb9ea 7495
a2fbb9ea
ET
7496 unregister_netdev(dev);
7497
7498 if (bp->regview)
7499 iounmap(bp->regview);
7500
7501 if (bp->doorbells)
7502 iounmap(bp->doorbells);
7503
7504 free_netdev(dev);
7505 pci_release_regions(pdev);
7506 pci_disable_device(pdev);
7507 pci_set_drvdata(pdev, NULL);
7508}
7509
7510static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
7511{
7512 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
7513 struct bnx2x *bp;
7514
7515 if (!dev)
7516 return 0;
a2fbb9ea
ET
7517
7518 if (!netif_running(dev))
7519 return 0;
7520
228241eb
ET
7521 bp = netdev_priv(dev);
7522
7523 bnx2x_nic_unload(bp, 0);
a2fbb9ea
ET
7524
7525 netif_device_detach(dev);
a2fbb9ea 7526
228241eb 7527 pci_save_state(pdev);
a2fbb9ea 7528 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 7529
a2fbb9ea
ET
7530 return 0;
7531}
7532
7533static int bnx2x_resume(struct pci_dev *pdev)
7534{
7535 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 7536 struct bnx2x *bp;
a2fbb9ea
ET
7537 int rc;
7538
228241eb
ET
7539 if (!dev) {
7540 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
7541 return -ENODEV;
7542 }
7543
a2fbb9ea
ET
7544 if (!netif_running(dev))
7545 return 0;
7546
228241eb 7547 bp = netdev_priv(dev);
a2fbb9ea 7548
228241eb 7549 pci_restore_state(pdev);
a2fbb9ea
ET
7550 bnx2x_set_power_state(bp, PCI_D0);
7551 netif_device_attach(dev);
7552
7553 rc = bnx2x_nic_load(bp, 0);
7554 if (rc)
7555 return rc;
7556
7557 return 0;
7558}
7559
7560static struct pci_driver bnx2x_pci_driver = {
7561 .name = DRV_MODULE_NAME,
7562 .id_table = bnx2x_pci_tbl,
7563 .probe = bnx2x_init_one,
7564 .remove = __devexit_p(bnx2x_remove_one),
7565 .suspend = bnx2x_suspend,
7566 .resume = bnx2x_resume,
7567};
7568
7569static int __init bnx2x_init(void)
7570{
7571 return pci_register_driver(&bnx2x_pci_driver);
7572}
7573
7574static void __exit bnx2x_cleanup(void)
7575{
7576 pci_unregister_driver(&bnx2x_pci_driver);
7577}
7578
7579module_init(bnx2x_init);
7580module_exit(bnx2x_cleanup);
7581