]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
bnx2x: Flow control enhancement
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
53#include "bnx2x_reg.h"
54#include "bnx2x_fw_defs.h"
55#include "bnx2x_hsi.h"
c18487ee 56#include "bnx2x_link.h"
a2fbb9ea
ET
57#include "bnx2x.h"
58#include "bnx2x_init.h"
59
e8b5fc51
VZ
60#define DRV_MODULE_VERSION "1.45.26"
61#define DRV_MODULE_RELDATE "2009/01/26"
34f80b04 62#define BNX2X_BC_VER 0x040200
a2fbb9ea 63
34f80b04
EG
64/* Time in jiffies before concluding the transmitter is hung */
65#define TX_TIMEOUT (5*HZ)
a2fbb9ea 66
53a10565 67static char version[] __devinitdata =
34f80b04 68 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
69 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70
24e3fcef 71MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 72MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
73MODULE_LICENSE("GPL");
74MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 75
555f6c78
EG
76static int multi_mode = 1;
77module_param(multi_mode, int, 0);
78
19680c48 79static int disable_tpa;
a2fbb9ea 80static int poll;
a2fbb9ea 81static int debug;
34f80b04 82static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 83
19680c48 84module_param(disable_tpa, int, 0);
8badd27a
EG
85
86static int int_mode;
87module_param(int_mode, int, 0);
88MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
89
a2fbb9ea 90module_param(poll, int, 0);
a2fbb9ea 91module_param(debug, int, 0);
19680c48 92MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
a2fbb9ea 93MODULE_PARM_DESC(poll, "use polling (for debug)");
c14423fe 94MODULE_PARM_DESC(debug, "default debug msglevel");
a2fbb9ea 95
1cf167f2 96static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
97
98enum bnx2x_board_type {
99 BCM57710 = 0,
34f80b04
EG
100 BCM57711 = 1,
101 BCM57711E = 2,
a2fbb9ea
ET
102};
103
34f80b04 104/* indexed by board_type, above */
53a10565 105static struct {
a2fbb9ea
ET
106 char *name;
107} board_info[] __devinitdata = {
34f80b04
EG
108 { "Broadcom NetXtreme II BCM57710 XGb" },
109 { "Broadcom NetXtreme II BCM57711 XGb" },
110 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
111};
112
34f80b04 113
a2fbb9ea
ET
114static const struct pci_device_id bnx2x_pci_tbl[] = {
115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
116 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
121 { 0 }
122};
123
124MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
125
126/****************************************************************************
127* General service functions
128****************************************************************************/
129
130/* used only at init
131 * locking is done by mcp
132 */
133static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
134{
135 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
136 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
138 PCICFG_VENDOR_ID_OFFSET);
139}
140
a2fbb9ea
ET
141static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
142{
143 u32 val;
144
145 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
146 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
148 PCICFG_VENDOR_ID_OFFSET);
149
150 return val;
151}
a2fbb9ea
ET
152
153static const u32 dmae_reg_go_c[] = {
154 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
158};
159
160/* copy command into DMAE command memory and set DMAE command go */
161static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
162 int idx)
163{
164 u32 cmd_offset;
165 int i;
166
167 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
170
ad8d3948
EG
171 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
173 }
174 REG_WR(bp, dmae_reg_go_c[idx], 1);
175}
176
ad8d3948
EG
177void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
178 u32 len32)
a2fbb9ea 179{
ad8d3948 180 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 181 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
182 int cnt = 200;
183
184 if (!bp->dmae_ready) {
185 u32 *data = bnx2x_sp(bp, wb_data[0]);
186
187 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
188 " using indirect\n", dst_addr, len32);
189 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
190 return;
191 }
192
193 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
194
195 memset(dmae, 0, sizeof(struct dmae_command));
196
197 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
198 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
199 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
200#ifdef __BIG_ENDIAN
201 DMAE_CMD_ENDIANITY_B_DW_SWAP |
202#else
203 DMAE_CMD_ENDIANITY_DW_SWAP |
204#endif
34f80b04
EG
205 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
206 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
207 dmae->src_addr_lo = U64_LO(dma_addr);
208 dmae->src_addr_hi = U64_HI(dma_addr);
209 dmae->dst_addr_lo = dst_addr >> 2;
210 dmae->dst_addr_hi = 0;
211 dmae->len = len32;
212 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
213 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 214 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 215
ad8d3948 216 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
217 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
218 "dst_addr [%x:%08x (%08x)]\n"
219 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
220 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
221 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
222 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 223 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
224 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
225 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
226
227 *wb_comp = 0;
228
34f80b04 229 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
230
231 udelay(5);
ad8d3948
EG
232
233 while (*wb_comp != DMAE_COMP_VAL) {
234 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
235
ad8d3948 236 if (!cnt) {
a2fbb9ea
ET
237 BNX2X_ERR("dmae timeout!\n");
238 break;
239 }
ad8d3948 240 cnt--;
12469401
YG
241 /* adjust delay for emulation/FPGA */
242 if (CHIP_REV_IS_SLOW(bp))
243 msleep(100);
244 else
245 udelay(5);
a2fbb9ea 246 }
ad8d3948
EG
247
248 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
249}
250
c18487ee 251void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 252{
ad8d3948 253 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 254 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
255 int cnt = 200;
256
257 if (!bp->dmae_ready) {
258 u32 *data = bnx2x_sp(bp, wb_data[0]);
259 int i;
260
261 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
262 " using indirect\n", src_addr, len32);
263 for (i = 0; i < len32; i++)
264 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
265 return;
266 }
267
268 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
269
270 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
271 memset(dmae, 0, sizeof(struct dmae_command));
272
273 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
274 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
275 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
276#ifdef __BIG_ENDIAN
277 DMAE_CMD_ENDIANITY_B_DW_SWAP |
278#else
279 DMAE_CMD_ENDIANITY_DW_SWAP |
280#endif
34f80b04
EG
281 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
282 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
283 dmae->src_addr_lo = src_addr >> 2;
284 dmae->src_addr_hi = 0;
285 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
286 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
287 dmae->len = len32;
288 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
289 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 290 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 291
ad8d3948 292 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
293 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
294 "dst_addr [%x:%08x (%08x)]\n"
295 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
296 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
297 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
298 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
299
300 *wb_comp = 0;
301
34f80b04 302 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
303
304 udelay(5);
ad8d3948
EG
305
306 while (*wb_comp != DMAE_COMP_VAL) {
307
ad8d3948 308 if (!cnt) {
a2fbb9ea
ET
309 BNX2X_ERR("dmae timeout!\n");
310 break;
311 }
ad8d3948 312 cnt--;
12469401
YG
313 /* adjust delay for emulation/FPGA */
314 if (CHIP_REV_IS_SLOW(bp))
315 msleep(100);
316 else
317 udelay(5);
a2fbb9ea 318 }
ad8d3948 319 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
320 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
321 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
322
323 mutex_unlock(&bp->dmae_mutex);
324}
325
326/* used only for slowpath so not inlined */
327static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
328{
329 u32 wb_write[2];
330
331 wb_write[0] = val_hi;
332 wb_write[1] = val_lo;
333 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 334}
a2fbb9ea 335
ad8d3948
EG
336#ifdef USE_WB_RD
337static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
338{
339 u32 wb_data[2];
340
341 REG_RD_DMAE(bp, reg, wb_data, 2);
342
343 return HILO_U64(wb_data[0], wb_data[1]);
344}
345#endif
346
a2fbb9ea
ET
347static int bnx2x_mc_assert(struct bnx2x *bp)
348{
a2fbb9ea 349 char last_idx;
34f80b04
EG
350 int i, rc = 0;
351 u32 row0, row1, row2, row3;
352
353 /* XSTORM */
354 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
355 XSTORM_ASSERT_LIST_INDEX_OFFSET);
356 if (last_idx)
357 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
358
359 /* print the asserts */
360 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
361
362 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
363 XSTORM_ASSERT_LIST_OFFSET(i));
364 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
366 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
368 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
370
371 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
372 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
373 " 0x%08x 0x%08x 0x%08x\n",
374 i, row3, row2, row1, row0);
375 rc++;
376 } else {
377 break;
378 }
379 }
380
381 /* TSTORM */
382 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
383 TSTORM_ASSERT_LIST_INDEX_OFFSET);
384 if (last_idx)
385 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
386
387 /* print the asserts */
388 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
389
390 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
391 TSTORM_ASSERT_LIST_OFFSET(i));
392 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
394 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
396 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
398
399 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
400 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
401 " 0x%08x 0x%08x 0x%08x\n",
402 i, row3, row2, row1, row0);
403 rc++;
404 } else {
405 break;
406 }
407 }
408
409 /* CSTORM */
410 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
411 CSTORM_ASSERT_LIST_INDEX_OFFSET);
412 if (last_idx)
413 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
414
415 /* print the asserts */
416 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
417
418 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
419 CSTORM_ASSERT_LIST_OFFSET(i));
420 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
422 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
424 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
426
427 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
428 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
429 " 0x%08x 0x%08x 0x%08x\n",
430 i, row3, row2, row1, row0);
431 rc++;
432 } else {
433 break;
434 }
435 }
436
437 /* USTORM */
438 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
439 USTORM_ASSERT_LIST_INDEX_OFFSET);
440 if (last_idx)
441 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
442
443 /* print the asserts */
444 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
445
446 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
447 USTORM_ASSERT_LIST_OFFSET(i));
448 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i) + 4);
450 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 8);
452 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 12);
454
455 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
456 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
457 " 0x%08x 0x%08x 0x%08x\n",
458 i, row3, row2, row1, row0);
459 rc++;
460 } else {
461 break;
a2fbb9ea
ET
462 }
463 }
34f80b04 464
a2fbb9ea
ET
465 return rc;
466}
c14423fe 467
a2fbb9ea
ET
468static void bnx2x_fw_dump(struct bnx2x *bp)
469{
470 u32 mark, offset;
471 u32 data[9];
472 int word;
473
474 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
475 mark = ((mark + 0x3) & ~0x3);
476 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
477
478 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
479 for (word = 0; word < 8; word++)
480 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
481 offset + 4*word));
482 data[8] = 0x0;
49d66772 483 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
484 }
485 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
486 for (word = 0; word < 8; word++)
487 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
488 offset + 4*word));
489 data[8] = 0x0;
49d66772 490 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
491 }
492 printk("\n" KERN_ERR PFX "end of fw dump\n");
493}
494
495static void bnx2x_panic_dump(struct bnx2x *bp)
496{
497 int i;
498 u16 j, start, end;
499
66e855f3
YG
500 bp->stats_state = STATS_STATE_DISABLED;
501 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
502
a2fbb9ea
ET
503 BNX2X_ERR("begin crash dump -----------------\n");
504
505 for_each_queue(bp, i) {
506 struct bnx2x_fastpath *fp = &bp->fp[i];
507 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
508
509 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
34f80b04 510 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
a2fbb9ea 511 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
34f80b04 512 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
66e855f3
YG
513 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
514 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
515 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
516 fp->rx_bd_prod, fp->rx_bd_cons,
517 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
518 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
519 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
520 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
521 " *sb_u_idx(%x) bd data(%x,%x)\n",
522 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
523 fp->status_blk->c_status_block.status_block_index,
524 fp->fp_u_idx,
525 fp->status_blk->u_status_block.status_block_index,
526 hw_prods->packets_prod, hw_prods->bds_prod);
a2fbb9ea
ET
527
528 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
529 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
530 for (j = start; j < end; j++) {
531 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
532
533 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
534 sw_bd->skb, sw_bd->first_bd);
535 }
536
537 start = TX_BD(fp->tx_bd_cons - 10);
538 end = TX_BD(fp->tx_bd_cons + 254);
539 for (j = start; j < end; j++) {
540 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
541
542 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
543 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
544 }
545
546 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
547 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
548 for (j = start; j < end; j++) {
549 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
550 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
551
552 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
34f80b04 553 j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
554 }
555
3196a88a
EG
556 start = RX_SGE(fp->rx_sge_prod);
557 end = RX_SGE(fp->last_max_sge);
7a9b2557
VZ
558 for (j = start; j < end; j++) {
559 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
560 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
561
562 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
563 j, rx_sge[1], rx_sge[0], sw_page->page);
564 }
565
a2fbb9ea
ET
566 start = RCQ_BD(fp->rx_comp_cons - 10);
567 end = RCQ_BD(fp->rx_comp_cons + 503);
568 for (j = start; j < end; j++) {
569 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
570
571 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
572 j, cqe[0], cqe[1], cqe[2], cqe[3]);
573 }
574 }
575
49d66772
ET
576 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
577 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
a2fbb9ea 578 " spq_prod_idx(%u)\n",
49d66772 579 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
a2fbb9ea
ET
580 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
581
34f80b04 582 bnx2x_fw_dump(bp);
a2fbb9ea
ET
583 bnx2x_mc_assert(bp);
584 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
585}
586
615f8fd9 587static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 588{
34f80b04 589 int port = BP_PORT(bp);
a2fbb9ea
ET
590 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
591 u32 val = REG_RD(bp, addr);
592 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 593 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
594
595 if (msix) {
8badd27a
EG
596 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
597 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
598 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
599 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
600 } else if (msi) {
601 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
602 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
603 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
604 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
605 } else {
606 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 607 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
608 HC_CONFIG_0_REG_INT_LINE_EN_0 |
609 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 610
8badd27a
EG
611 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
612 val, port, addr);
615f8fd9
ET
613
614 REG_WR(bp, addr, val);
615
a2fbb9ea
ET
616 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
617 }
618
8badd27a
EG
619 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
620 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
621
622 REG_WR(bp, addr, val);
34f80b04
EG
623
624 if (CHIP_IS_E1H(bp)) {
625 /* init leading/trailing edge */
626 if (IS_E1HMF(bp)) {
8badd27a 627 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04
EG
628 if (bp->port.pmf)
629 /* enable nig attention */
630 val |= 0x0100;
631 } else
632 val = 0xffff;
633
634 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
635 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
636 }
a2fbb9ea
ET
637}
638
615f8fd9 639static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 640{
34f80b04 641 int port = BP_PORT(bp);
a2fbb9ea
ET
642 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
643 u32 val = REG_RD(bp, addr);
644
645 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
646 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
647 HC_CONFIG_0_REG_INT_LINE_EN_0 |
648 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
649
650 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
651 val, port, addr);
652
8badd27a
EG
653 /* flush all outstanding writes */
654 mmiowb();
655
a2fbb9ea
ET
656 REG_WR(bp, addr, val);
657 if (REG_RD(bp, addr) != val)
658 BNX2X_ERR("BUG! proper val not read from IGU!\n");
659}
660
f8ef6e44 661static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 662{
a2fbb9ea 663 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 664 int i, offset;
a2fbb9ea 665
34f80b04 666 /* disable interrupt handling */
a2fbb9ea 667 atomic_inc(&bp->intr_sem);
f8ef6e44
YG
668 if (disable_hw)
669 /* prevent the HW from sending interrupts */
670 bnx2x_int_disable(bp);
a2fbb9ea
ET
671
672 /* make sure all ISRs are done */
673 if (msix) {
8badd27a
EG
674 synchronize_irq(bp->msix_table[0].vector);
675 offset = 1;
a2fbb9ea 676 for_each_queue(bp, i)
8badd27a 677 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
678 } else
679 synchronize_irq(bp->pdev->irq);
680
681 /* make sure sp_task is not running */
1cf167f2
EG
682 cancel_delayed_work(&bp->sp_task);
683 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
684}
685
34f80b04 686/* fast path */
a2fbb9ea
ET
687
688/*
34f80b04 689 * General service functions
a2fbb9ea
ET
690 */
691
34f80b04 692static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
693 u8 storm, u16 index, u8 op, u8 update)
694{
5c862848
EG
695 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
696 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
697 struct igu_ack_register igu_ack;
698
699 igu_ack.status_block_index = index;
700 igu_ack.sb_id_and_flags =
34f80b04 701 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
702 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
703 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
704 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
705
5c862848
EG
706 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
707 (*(u32 *)&igu_ack), hc_addr);
708 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
a2fbb9ea
ET
709}
710
711static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
712{
713 struct host_status_block *fpsb = fp->status_blk;
714 u16 rc = 0;
715
716 barrier(); /* status block is written to by the chip */
717 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
718 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
719 rc |= 1;
720 }
721 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
722 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
723 rc |= 2;
724 }
725 return rc;
726}
727
a2fbb9ea
ET
728static u16 bnx2x_ack_int(struct bnx2x *bp)
729{
5c862848
EG
730 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
731 COMMAND_REG_SIMD_MASK);
732 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 733
5c862848
EG
734 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
735 result, hc_addr);
a2fbb9ea 736
a2fbb9ea
ET
737 return result;
738}
739
740
741/*
742 * fast path service functions
743 */
744
237907c1
EG
745static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
746{
747 u16 tx_cons_sb;
748
749 /* Tell compiler that status block fields can change */
750 barrier();
751 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
e8b5fc51
VZ
752 return (fp->tx_pkt_cons != tx_cons_sb);
753}
754
755static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
756{
757 /* Tell compiler that consumer and producer can change */
758 barrier();
759 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
760
237907c1
EG
761}
762
a2fbb9ea
ET
763/* free skb in the packet ring at pos idx
764 * return idx of last bd freed
765 */
766static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
767 u16 idx)
768{
769 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
770 struct eth_tx_bd *tx_bd;
771 struct sk_buff *skb = tx_buf->skb;
34f80b04 772 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
773 int nbd;
774
775 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
776 idx, tx_buf, skb);
777
778 /* unmap first bd */
779 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
780 tx_bd = &fp->tx_desc_ring[bd_idx];
781 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
782 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
783
784 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 785 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
786#ifdef BNX2X_STOP_ON_ERROR
787 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 788 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
789 bnx2x_panic();
790 }
791#endif
792
793 /* Skip a parse bd and the TSO split header bd
794 since they have no mapping */
795 if (nbd)
796 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
797
798 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
799 ETH_TX_BD_FLAGS_TCP_CSUM |
800 ETH_TX_BD_FLAGS_SW_LSO)) {
801 if (--nbd)
802 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
803 tx_bd = &fp->tx_desc_ring[bd_idx];
804 /* is this a TSO split header bd? */
805 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
806 if (--nbd)
807 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
808 }
809 }
810
811 /* now free frags */
812 while (nbd > 0) {
813
814 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
815 tx_bd = &fp->tx_desc_ring[bd_idx];
816 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
817 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
818 if (--nbd)
819 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
820 }
821
822 /* release skb */
53e5e96e 823 WARN_ON(!skb);
a2fbb9ea
ET
824 dev_kfree_skb(skb);
825 tx_buf->first_bd = 0;
826 tx_buf->skb = NULL;
827
34f80b04 828 return new_cons;
a2fbb9ea
ET
829}
830
34f80b04 831static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 832{
34f80b04
EG
833 s16 used;
834 u16 prod;
835 u16 cons;
a2fbb9ea 836
34f80b04 837 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
838 prod = fp->tx_bd_prod;
839 cons = fp->tx_bd_cons;
840
34f80b04
EG
841 /* NUM_TX_RINGS = number of "next-page" entries
842 It will be used as a threshold */
843 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 844
34f80b04 845#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
846 WARN_ON(used < 0);
847 WARN_ON(used > fp->bp->tx_ring_size);
848 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 849#endif
a2fbb9ea 850
34f80b04 851 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
852}
853
854static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
855{
856 struct bnx2x *bp = fp->bp;
555f6c78 857 struct netdev_queue *txq;
a2fbb9ea
ET
858 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
859 int done = 0;
860
861#ifdef BNX2X_STOP_ON_ERROR
862 if (unlikely(bp->panic))
863 return;
864#endif
865
555f6c78 866 txq = netdev_get_tx_queue(bp->dev, fp->index);
a2fbb9ea
ET
867 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
868 sw_cons = fp->tx_pkt_cons;
869
870 while (sw_cons != hw_cons) {
871 u16 pkt_cons;
872
873 pkt_cons = TX_BD(sw_cons);
874
875 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
876
34f80b04 877 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
878 hw_cons, sw_cons, pkt_cons);
879
34f80b04 880/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
881 rmb();
882 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
883 }
884*/
885 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
886 sw_cons++;
887 done++;
888
889 if (done == work)
890 break;
891 }
892
893 fp->tx_pkt_cons = sw_cons;
894 fp->tx_bd_cons = bd_cons;
895
555f6c78
EG
896 /* Need to make the tx_bd_cons update visible to start_xmit()
897 * before checking for netif_tx_queue_stopped(). Without the
a2fbb9ea
ET
898 * memory barrier, there is a small possibility that start_xmit()
899 * will miss it and cause the queue to be stopped forever.
900 */
901 smp_mb();
902
903 /* TBD need a thresh? */
555f6c78 904 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 905
555f6c78 906 __netif_tx_lock(txq, smp_processor_id());
a2fbb9ea 907
555f6c78 908 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 909 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 910 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 911 netif_tx_wake_queue(txq);
a2fbb9ea 912
555f6c78 913 __netif_tx_unlock(txq);
a2fbb9ea
ET
914 }
915}
916
3196a88a 917
a2fbb9ea
ET
918static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
919 union eth_rx_cqe *rr_cqe)
920{
921 struct bnx2x *bp = fp->bp;
922 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
923 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
924
34f80b04 925 DP(BNX2X_MSG_SP,
a2fbb9ea 926 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
34f80b04
EG
927 FP_IDX(fp), cid, command, bp->state,
928 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
929
930 bp->spq_left++;
931
34f80b04 932 if (FP_IDX(fp)) {
a2fbb9ea
ET
933 switch (command | fp->state) {
934 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
935 BNX2X_FP_STATE_OPENING):
936 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
937 cid);
938 fp->state = BNX2X_FP_STATE_OPEN;
939 break;
940
941 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
942 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
943 cid);
944 fp->state = BNX2X_FP_STATE_HALTED;
945 break;
946
947 default:
34f80b04
EG
948 BNX2X_ERR("unexpected MC reply (%d) "
949 "fp->state is %x\n", command, fp->state);
950 break;
a2fbb9ea 951 }
34f80b04 952 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
953 return;
954 }
c14423fe 955
a2fbb9ea
ET
956 switch (command | bp->state) {
957 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
958 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
959 bp->state = BNX2X_STATE_OPEN;
960 break;
961
962 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
963 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
964 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
965 fp->state = BNX2X_FP_STATE_HALTED;
966 break;
967
a2fbb9ea 968 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 969 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 970 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
971 break;
972
3196a88a 973
a2fbb9ea 974 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 975 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 976 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 977 bp->set_mac_pending = 0;
a2fbb9ea
ET
978 break;
979
49d66772 980 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 981 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
982 break;
983
a2fbb9ea 984 default:
34f80b04 985 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 986 command, bp->state);
34f80b04 987 break;
a2fbb9ea 988 }
34f80b04 989 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
990}
991
7a9b2557
VZ
992static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
993 struct bnx2x_fastpath *fp, u16 index)
994{
995 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
996 struct page *page = sw_buf->page;
997 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
998
999 /* Skip "next page" elements */
1000 if (!page)
1001 return;
1002
1003 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1004 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1005 __free_pages(page, PAGES_PER_SGE_SHIFT);
1006
1007 sw_buf->page = NULL;
1008 sge->addr_hi = 0;
1009 sge->addr_lo = 0;
1010}
1011
1012static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1013 struct bnx2x_fastpath *fp, int last)
1014{
1015 int i;
1016
1017 for (i = 0; i < last; i++)
1018 bnx2x_free_rx_sge(bp, fp, i);
1019}
1020
1021static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1022 struct bnx2x_fastpath *fp, u16 index)
1023{
1024 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1025 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1026 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1027 dma_addr_t mapping;
1028
1029 if (unlikely(page == NULL))
1030 return -ENOMEM;
1031
4f40f2cb 1032 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1033 PCI_DMA_FROMDEVICE);
8d8bb39b 1034 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1035 __free_pages(page, PAGES_PER_SGE_SHIFT);
1036 return -ENOMEM;
1037 }
1038
1039 sw_buf->page = page;
1040 pci_unmap_addr_set(sw_buf, mapping, mapping);
1041
1042 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1043 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1044
1045 return 0;
1046}
1047
a2fbb9ea
ET
1048static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1049 struct bnx2x_fastpath *fp, u16 index)
1050{
1051 struct sk_buff *skb;
1052 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1053 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1054 dma_addr_t mapping;
1055
1056 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1057 if (unlikely(skb == NULL))
1058 return -ENOMEM;
1059
437cf2f1 1060 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1061 PCI_DMA_FROMDEVICE);
8d8bb39b 1062 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1063 dev_kfree_skb(skb);
1064 return -ENOMEM;
1065 }
1066
1067 rx_buf->skb = skb;
1068 pci_unmap_addr_set(rx_buf, mapping, mapping);
1069
1070 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1071 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1072
1073 return 0;
1074}
1075
1076/* note that we are not allocating a new skb,
1077 * we are just moving one from cons to prod
1078 * we are not creating a new mapping,
1079 * so there is no need to check for dma_mapping_error().
1080 */
1081static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1082 struct sk_buff *skb, u16 cons, u16 prod)
1083{
1084 struct bnx2x *bp = fp->bp;
1085 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1086 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1087 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1088 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1089
1090 pci_dma_sync_single_for_device(bp->pdev,
1091 pci_unmap_addr(cons_rx_buf, mapping),
1092 bp->rx_offset + RX_COPY_THRESH,
1093 PCI_DMA_FROMDEVICE);
1094
1095 prod_rx_buf->skb = cons_rx_buf->skb;
1096 pci_unmap_addr_set(prod_rx_buf, mapping,
1097 pci_unmap_addr(cons_rx_buf, mapping));
1098 *prod_bd = *cons_bd;
1099}
1100
7a9b2557
VZ
1101static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1102 u16 idx)
1103{
1104 u16 last_max = fp->last_max_sge;
1105
1106 if (SUB_S16(idx, last_max) > 0)
1107 fp->last_max_sge = idx;
1108}
1109
1110static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1111{
1112 int i, j;
1113
1114 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1115 int idx = RX_SGE_CNT * i - 1;
1116
1117 for (j = 0; j < 2; j++) {
1118 SGE_MASK_CLEAR_BIT(fp, idx);
1119 idx--;
1120 }
1121 }
1122}
1123
1124static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1125 struct eth_fast_path_rx_cqe *fp_cqe)
1126{
1127 struct bnx2x *bp = fp->bp;
4f40f2cb 1128 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1129 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1130 SGE_PAGE_SHIFT;
7a9b2557
VZ
1131 u16 last_max, last_elem, first_elem;
1132 u16 delta = 0;
1133 u16 i;
1134
1135 if (!sge_len)
1136 return;
1137
1138 /* First mark all used pages */
1139 for (i = 0; i < sge_len; i++)
1140 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1141
1142 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1143 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1144
1145 /* Here we assume that the last SGE index is the biggest */
1146 prefetch((void *)(fp->sge_mask));
1147 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1148
1149 last_max = RX_SGE(fp->last_max_sge);
1150 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1151 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1152
1153 /* If ring is not full */
1154 if (last_elem + 1 != first_elem)
1155 last_elem++;
1156
1157 /* Now update the prod */
1158 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1159 if (likely(fp->sge_mask[i]))
1160 break;
1161
1162 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1163 delta += RX_SGE_MASK_ELEM_SZ;
1164 }
1165
1166 if (delta > 0) {
1167 fp->rx_sge_prod += delta;
1168 /* clear page-end entries */
1169 bnx2x_clear_sge_mask_next_elems(fp);
1170 }
1171
1172 DP(NETIF_MSG_RX_STATUS,
1173 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1174 fp->last_max_sge, fp->rx_sge_prod);
1175}
1176
1177static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1178{
1179 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1180 memset(fp->sge_mask, 0xff,
1181 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1182
33471629
EG
1183 /* Clear the two last indices in the page to 1:
1184 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1185 hence will never be indicated and should be removed from
1186 the calculations. */
1187 bnx2x_clear_sge_mask_next_elems(fp);
1188}
1189
1190static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1191 struct sk_buff *skb, u16 cons, u16 prod)
1192{
1193 struct bnx2x *bp = fp->bp;
1194 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1195 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1196 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1197 dma_addr_t mapping;
1198
1199 /* move empty skb from pool to prod and map it */
1200 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1201 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1202 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1203 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1204
1205 /* move partial skb from cons to pool (don't unmap yet) */
1206 fp->tpa_pool[queue] = *cons_rx_buf;
1207
1208 /* mark bin state as start - print error if current state != stop */
1209 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1210 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1211
1212 fp->tpa_state[queue] = BNX2X_TPA_START;
1213
1214 /* point prod_bd to new skb */
1215 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1216 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1217
1218#ifdef BNX2X_STOP_ON_ERROR
1219 fp->tpa_queue_used |= (1 << queue);
1220#ifdef __powerpc64__
1221 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1222#else
1223 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1224#endif
1225 fp->tpa_queue_used);
1226#endif
1227}
1228
1229static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1230 struct sk_buff *skb,
1231 struct eth_fast_path_rx_cqe *fp_cqe,
1232 u16 cqe_idx)
1233{
1234 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1235 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1236 u32 i, frag_len, frag_size, pages;
1237 int err;
1238 int j;
1239
1240 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1241 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1242
1243 /* This is needed in order to enable forwarding support */
1244 if (frag_size)
4f40f2cb 1245 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1246 max(frag_size, (u32)len_on_bd));
1247
1248#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1249 if (pages >
1250 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1251 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1252 pages, cqe_idx);
1253 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1254 fp_cqe->pkt_len, len_on_bd);
1255 bnx2x_panic();
1256 return -EINVAL;
1257 }
1258#endif
1259
1260 /* Run through the SGL and compose the fragmented skb */
1261 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1262 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1263
1264 /* FW gives the indices of the SGE as if the ring is an array
1265 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1266 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1267 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1268 old_rx_pg = *rx_pg;
1269
1270 /* If we fail to allocate a substitute page, we simply stop
1271 where we are and drop the whole packet */
1272 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1273 if (unlikely(err)) {
de832a55 1274 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1275 return err;
1276 }
1277
1278 /* Unmap the page as we r going to pass it to the stack */
1279 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1280 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1281
1282 /* Add one frag and update the appropriate fields in the skb */
1283 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1284
1285 skb->data_len += frag_len;
1286 skb->truesize += frag_len;
1287 skb->len += frag_len;
1288
1289 frag_size -= frag_len;
1290 }
1291
1292 return 0;
1293}
1294
1295static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1296 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1297 u16 cqe_idx)
1298{
1299 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1300 struct sk_buff *skb = rx_buf->skb;
1301 /* alloc new skb */
1302 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1303
1304 /* Unmap skb in the pool anyway, as we are going to change
1305 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1306 fails. */
1307 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1308 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1309
7a9b2557 1310 if (likely(new_skb)) {
66e855f3
YG
1311 /* fix ip xsum and give it to the stack */
1312 /* (no need to map the new skb) */
0c6671b0
EG
1313#ifdef BCM_VLAN
1314 int is_vlan_cqe =
1315 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1316 PARSING_FLAGS_VLAN);
1317 int is_not_hwaccel_vlan_cqe =
1318 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1319#endif
7a9b2557
VZ
1320
1321 prefetch(skb);
1322 prefetch(((char *)(skb)) + 128);
1323
7a9b2557
VZ
1324#ifdef BNX2X_STOP_ON_ERROR
1325 if (pad + len > bp->rx_buf_size) {
1326 BNX2X_ERR("skb_put is about to fail... "
1327 "pad %d len %d rx_buf_size %d\n",
1328 pad, len, bp->rx_buf_size);
1329 bnx2x_panic();
1330 return;
1331 }
1332#endif
1333
1334 skb_reserve(skb, pad);
1335 skb_put(skb, len);
1336
1337 skb->protocol = eth_type_trans(skb, bp->dev);
1338 skb->ip_summed = CHECKSUM_UNNECESSARY;
1339
1340 {
1341 struct iphdr *iph;
1342
1343 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1344#ifdef BCM_VLAN
1345 /* If there is no Rx VLAN offloading -
1346 take VLAN tag into an account */
1347 if (unlikely(is_not_hwaccel_vlan_cqe))
1348 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1349#endif
7a9b2557
VZ
1350 iph->check = 0;
1351 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1352 }
1353
1354 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1355 &cqe->fast_path_cqe, cqe_idx)) {
1356#ifdef BCM_VLAN
0c6671b0
EG
1357 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1358 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1359 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1360 le16_to_cpu(cqe->fast_path_cqe.
1361 vlan_tag));
1362 else
1363#endif
1364 netif_receive_skb(skb);
1365 } else {
1366 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1367 " - dropping packet!\n");
1368 dev_kfree_skb(skb);
1369 }
1370
7a9b2557
VZ
1371
1372 /* put new skb in bin */
1373 fp->tpa_pool[queue].skb = new_skb;
1374
1375 } else {
66e855f3 1376 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1377 DP(NETIF_MSG_RX_STATUS,
1378 "Failed to allocate new skb - dropping packet!\n");
de832a55 1379 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1380 }
1381
1382 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1383}
1384
1385static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1386 struct bnx2x_fastpath *fp,
1387 u16 bd_prod, u16 rx_comp_prod,
1388 u16 rx_sge_prod)
1389{
8d9c5f34 1390 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1391 int i;
1392
1393 /* Update producers */
1394 rx_prods.bd_prod = bd_prod;
1395 rx_prods.cqe_prod = rx_comp_prod;
1396 rx_prods.sge_prod = rx_sge_prod;
1397
58f4c4cf
EG
1398 /*
1399 * Make sure that the BD and SGE data is updated before updating the
1400 * producers since FW might read the BD/SGE right after the producer
1401 * is updated.
1402 * This is only applicable for weak-ordered memory model archs such
1403 * as IA-64. The following barrier is also mandatory since FW will
1404 * assumes BDs must have buffers.
1405 */
1406 wmb();
1407
8d9c5f34
EG
1408 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1409 REG_WR(bp, BAR_USTRORM_INTMEM +
1410 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
7a9b2557
VZ
1411 ((u32 *)&rx_prods)[i]);
1412
58f4c4cf
EG
1413 mmiowb(); /* keep prod updates ordered */
1414
7a9b2557 1415 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1416 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1417 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1418}
1419
a2fbb9ea
ET
1420static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1421{
1422 struct bnx2x *bp = fp->bp;
34f80b04 1423 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1424 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1425 int rx_pkt = 0;
1426
1427#ifdef BNX2X_STOP_ON_ERROR
1428 if (unlikely(bp->panic))
1429 return 0;
1430#endif
1431
34f80b04
EG
1432 /* CQ "next element" is of the size of the regular element,
1433 that's why it's ok here */
a2fbb9ea
ET
1434 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1435 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1436 hw_comp_cons++;
1437
1438 bd_cons = fp->rx_bd_cons;
1439 bd_prod = fp->rx_bd_prod;
34f80b04 1440 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1441 sw_comp_cons = fp->rx_comp_cons;
1442 sw_comp_prod = fp->rx_comp_prod;
1443
1444 /* Memory barrier necessary as speculative reads of the rx
1445 * buffer can be ahead of the index in the status block
1446 */
1447 rmb();
1448
1449 DP(NETIF_MSG_RX_STATUS,
1450 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
34f80b04 1451 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1452
1453 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1454 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1455 struct sk_buff *skb;
1456 union eth_rx_cqe *cqe;
34f80b04
EG
1457 u8 cqe_fp_flags;
1458 u16 len, pad;
a2fbb9ea
ET
1459
1460 comp_ring_cons = RCQ_BD(sw_comp_cons);
1461 bd_prod = RX_BD(bd_prod);
1462 bd_cons = RX_BD(bd_cons);
1463
1464 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1465 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1466
a2fbb9ea 1467 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1468 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1469 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1470 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1471 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1472 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1473
1474 /* is this a slowpath msg? */
34f80b04 1475 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1476 bnx2x_sp_event(fp, cqe);
1477 goto next_cqe;
1478
1479 /* this is an rx packet */
1480 } else {
1481 rx_buf = &fp->rx_buf_ring[bd_cons];
1482 skb = rx_buf->skb;
a2fbb9ea
ET
1483 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1484 pad = cqe->fast_path_cqe.placement_offset;
1485
7a9b2557
VZ
1486 /* If CQE is marked both TPA_START and TPA_END
1487 it is a non-TPA CQE */
1488 if ((!fp->disable_tpa) &&
1489 (TPA_TYPE(cqe_fp_flags) !=
1490 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1491 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1492
1493 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1494 DP(NETIF_MSG_RX_STATUS,
1495 "calling tpa_start on queue %d\n",
1496 queue);
1497
1498 bnx2x_tpa_start(fp, queue, skb,
1499 bd_cons, bd_prod);
1500 goto next_rx;
1501 }
1502
1503 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1504 DP(NETIF_MSG_RX_STATUS,
1505 "calling tpa_stop on queue %d\n",
1506 queue);
1507
1508 if (!BNX2X_RX_SUM_FIX(cqe))
1509 BNX2X_ERR("STOP on none TCP "
1510 "data\n");
1511
1512 /* This is a size of the linear data
1513 on this skb */
1514 len = le16_to_cpu(cqe->fast_path_cqe.
1515 len_on_bd);
1516 bnx2x_tpa_stop(bp, fp, queue, pad,
1517 len, cqe, comp_ring_cons);
1518#ifdef BNX2X_STOP_ON_ERROR
1519 if (bp->panic)
1520 return -EINVAL;
1521#endif
1522
1523 bnx2x_update_sge_prod(fp,
1524 &cqe->fast_path_cqe);
1525 goto next_cqe;
1526 }
1527 }
1528
a2fbb9ea
ET
1529 pci_dma_sync_single_for_device(bp->pdev,
1530 pci_unmap_addr(rx_buf, mapping),
1531 pad + RX_COPY_THRESH,
1532 PCI_DMA_FROMDEVICE);
1533 prefetch(skb);
1534 prefetch(((char *)(skb)) + 128);
1535
1536 /* is this an error packet? */
34f80b04 1537 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1538 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1539 "ERROR flags %x rx packet %u\n",
1540 cqe_fp_flags, sw_comp_cons);
de832a55 1541 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1542 goto reuse_rx;
1543 }
1544
1545 /* Since we don't have a jumbo ring
1546 * copy small packets if mtu > 1500
1547 */
1548 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1549 (len <= RX_COPY_THRESH)) {
1550 struct sk_buff *new_skb;
1551
1552 new_skb = netdev_alloc_skb(bp->dev,
1553 len + pad);
1554 if (new_skb == NULL) {
1555 DP(NETIF_MSG_RX_ERR,
34f80b04 1556 "ERROR packet dropped "
a2fbb9ea 1557 "because of alloc failure\n");
de832a55 1558 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1559 goto reuse_rx;
1560 }
1561
1562 /* aligned copy */
1563 skb_copy_from_linear_data_offset(skb, pad,
1564 new_skb->data + pad, len);
1565 skb_reserve(new_skb, pad);
1566 skb_put(new_skb, len);
1567
1568 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1569
1570 skb = new_skb;
1571
1572 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1573 pci_unmap_single(bp->pdev,
1574 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1575 bp->rx_buf_size,
a2fbb9ea
ET
1576 PCI_DMA_FROMDEVICE);
1577 skb_reserve(skb, pad);
1578 skb_put(skb, len);
1579
1580 } else {
1581 DP(NETIF_MSG_RX_ERR,
34f80b04 1582 "ERROR packet dropped because "
a2fbb9ea 1583 "of alloc failure\n");
de832a55 1584 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1585reuse_rx:
1586 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1587 goto next_rx;
1588 }
1589
1590 skb->protocol = eth_type_trans(skb, bp->dev);
1591
1592 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1593 if (bp->rx_csum) {
1adcd8be
EG
1594 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1595 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1596 else
de832a55 1597 fp->eth_q_stats.hw_csum_err++;
66e855f3 1598 }
a2fbb9ea
ET
1599 }
1600
748e5439 1601 skb_record_rx_queue(skb, fp->index);
a2fbb9ea 1602#ifdef BCM_VLAN
0c6671b0 1603 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1604 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1605 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1606 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1607 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1608 else
1609#endif
34f80b04 1610 netif_receive_skb(skb);
a2fbb9ea 1611
a2fbb9ea
ET
1612
1613next_rx:
1614 rx_buf->skb = NULL;
1615
1616 bd_cons = NEXT_RX_IDX(bd_cons);
1617 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1618 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1619 rx_pkt++;
a2fbb9ea
ET
1620next_cqe:
1621 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1622 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1623
34f80b04 1624 if (rx_pkt == budget)
a2fbb9ea
ET
1625 break;
1626 } /* while */
1627
1628 fp->rx_bd_cons = bd_cons;
34f80b04 1629 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1630 fp->rx_comp_cons = sw_comp_cons;
1631 fp->rx_comp_prod = sw_comp_prod;
1632
7a9b2557
VZ
1633 /* Update producers */
1634 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1635 fp->rx_sge_prod);
a2fbb9ea
ET
1636
1637 fp->rx_pkt += rx_pkt;
1638 fp->rx_calls++;
1639
1640 return rx_pkt;
1641}
1642
1643static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1644{
1645 struct bnx2x_fastpath *fp = fp_cookie;
1646 struct bnx2x *bp = fp->bp;
34f80b04 1647 int index = FP_IDX(fp);
a2fbb9ea 1648
da5a662a
VZ
1649 /* Return here if interrupt is disabled */
1650 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1651 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1652 return IRQ_HANDLED;
1653 }
1654
34f80b04
EG
1655 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1656 index, FP_SB_ID(fp));
1657 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1658
1659#ifdef BNX2X_STOP_ON_ERROR
1660 if (unlikely(bp->panic))
1661 return IRQ_HANDLED;
1662#endif
1663
1664 prefetch(fp->rx_cons_sb);
1665 prefetch(fp->tx_cons_sb);
1666 prefetch(&fp->status_blk->c_status_block.status_block_index);
1667 prefetch(&fp->status_blk->u_status_block.status_block_index);
1668
288379f0 1669 napi_schedule(&bnx2x_fp(bp, index, napi));
34f80b04 1670
a2fbb9ea
ET
1671 return IRQ_HANDLED;
1672}
1673
1674static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1675{
555f6c78 1676 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1677 u16 status = bnx2x_ack_int(bp);
34f80b04 1678 u16 mask;
a2fbb9ea 1679
34f80b04 1680 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1681 if (unlikely(status == 0)) {
1682 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1683 return IRQ_NONE;
1684 }
34f80b04 1685 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
a2fbb9ea 1686
34f80b04 1687 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1688 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1689 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1690 return IRQ_HANDLED;
1691 }
1692
3196a88a
EG
1693#ifdef BNX2X_STOP_ON_ERROR
1694 if (unlikely(bp->panic))
1695 return IRQ_HANDLED;
1696#endif
1697
34f80b04
EG
1698 mask = 0x2 << bp->fp[0].sb_id;
1699 if (status & mask) {
a2fbb9ea
ET
1700 struct bnx2x_fastpath *fp = &bp->fp[0];
1701
1702 prefetch(fp->rx_cons_sb);
1703 prefetch(fp->tx_cons_sb);
1704 prefetch(&fp->status_blk->c_status_block.status_block_index);
1705 prefetch(&fp->status_blk->u_status_block.status_block_index);
1706
288379f0 1707 napi_schedule(&bnx2x_fp(bp, 0, napi));
a2fbb9ea 1708
34f80b04 1709 status &= ~mask;
a2fbb9ea
ET
1710 }
1711
a2fbb9ea 1712
34f80b04 1713 if (unlikely(status & 0x1)) {
1cf167f2 1714 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1715
1716 status &= ~0x1;
1717 if (!status)
1718 return IRQ_HANDLED;
1719 }
1720
34f80b04
EG
1721 if (status)
1722 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1723 status);
a2fbb9ea 1724
c18487ee 1725 return IRQ_HANDLED;
a2fbb9ea
ET
1726}
1727
c18487ee 1728/* end of fast path */
a2fbb9ea 1729
bb2a0f7a 1730static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1731
c18487ee
YR
1732/* Link */
1733
1734/*
1735 * General service functions
1736 */
a2fbb9ea 1737
4a37fb66 1738static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1739{
1740 u32 lock_status;
1741 u32 resource_bit = (1 << resource);
4a37fb66
YG
1742 int func = BP_FUNC(bp);
1743 u32 hw_lock_control_reg;
c18487ee 1744 int cnt;
a2fbb9ea 1745
c18487ee
YR
1746 /* Validating that the resource is within range */
1747 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1748 DP(NETIF_MSG_HW,
1749 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1750 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1751 return -EINVAL;
1752 }
a2fbb9ea 1753
4a37fb66
YG
1754 if (func <= 5) {
1755 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1756 } else {
1757 hw_lock_control_reg =
1758 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1759 }
1760
c18487ee 1761 /* Validating that the resource is not already taken */
4a37fb66 1762 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1763 if (lock_status & resource_bit) {
1764 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1765 lock_status, resource_bit);
1766 return -EEXIST;
1767 }
a2fbb9ea 1768
46230476
EG
1769 /* Try for 5 second every 5ms */
1770 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1771 /* Try to acquire the lock */
4a37fb66
YG
1772 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1773 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1774 if (lock_status & resource_bit)
1775 return 0;
a2fbb9ea 1776
c18487ee 1777 msleep(5);
a2fbb9ea 1778 }
c18487ee
YR
1779 DP(NETIF_MSG_HW, "Timeout\n");
1780 return -EAGAIN;
1781}
a2fbb9ea 1782
4a37fb66 1783static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1784{
1785 u32 lock_status;
1786 u32 resource_bit = (1 << resource);
4a37fb66
YG
1787 int func = BP_FUNC(bp);
1788 u32 hw_lock_control_reg;
a2fbb9ea 1789
c18487ee
YR
1790 /* Validating that the resource is within range */
1791 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1792 DP(NETIF_MSG_HW,
1793 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1794 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1795 return -EINVAL;
1796 }
1797
4a37fb66
YG
1798 if (func <= 5) {
1799 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1800 } else {
1801 hw_lock_control_reg =
1802 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1803 }
1804
c18487ee 1805 /* Validating that the resource is currently taken */
4a37fb66 1806 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1807 if (!(lock_status & resource_bit)) {
1808 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1809 lock_status, resource_bit);
1810 return -EFAULT;
a2fbb9ea
ET
1811 }
1812
4a37fb66 1813 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1814 return 0;
1815}
1816
1817/* HW Lock for shared dual port PHYs */
4a37fb66 1818static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee
YR
1819{
1820 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1821
34f80b04 1822 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1823
c18487ee
YR
1824 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1825 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1826 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
c18487ee 1827}
a2fbb9ea 1828
4a37fb66 1829static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee
YR
1830{
1831 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1832
c18487ee
YR
1833 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1834 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1835 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
a2fbb9ea 1836
34f80b04 1837 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1838}
a2fbb9ea 1839
17de50b7 1840int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1841{
1842 /* The GPIO should be swapped if swap register is set and active */
1843 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1844 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1845 int gpio_shift = gpio_num +
1846 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1847 u32 gpio_mask = (1 << gpio_shift);
1848 u32 gpio_reg;
a2fbb9ea 1849
c18487ee
YR
1850 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1851 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1852 return -EINVAL;
1853 }
a2fbb9ea 1854
4a37fb66 1855 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1856 /* read GPIO and mask except the float bits */
1857 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1858
c18487ee
YR
1859 switch (mode) {
1860 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1861 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1862 gpio_num, gpio_shift);
1863 /* clear FLOAT and set CLR */
1864 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1865 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1866 break;
a2fbb9ea 1867
c18487ee
YR
1868 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1869 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1870 gpio_num, gpio_shift);
1871 /* clear FLOAT and set SET */
1872 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1873 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1874 break;
a2fbb9ea 1875
17de50b7 1876 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1877 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1878 gpio_num, gpio_shift);
1879 /* set FLOAT */
1880 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1881 break;
a2fbb9ea 1882
c18487ee
YR
1883 default:
1884 break;
a2fbb9ea
ET
1885 }
1886
c18487ee 1887 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1888 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1889
c18487ee 1890 return 0;
a2fbb9ea
ET
1891}
1892
c18487ee 1893static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1894{
c18487ee
YR
1895 u32 spio_mask = (1 << spio_num);
1896 u32 spio_reg;
a2fbb9ea 1897
c18487ee
YR
1898 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1899 (spio_num > MISC_REGISTERS_SPIO_7)) {
1900 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1901 return -EINVAL;
a2fbb9ea
ET
1902 }
1903
4a37fb66 1904 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1905 /* read SPIO and mask except the float bits */
1906 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1907
c18487ee 1908 switch (mode) {
6378c025 1909 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
1910 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1911 /* clear FLOAT and set CLR */
1912 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1913 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1914 break;
a2fbb9ea 1915
6378c025 1916 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
1917 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1918 /* clear FLOAT and set SET */
1919 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1920 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1921 break;
a2fbb9ea 1922
c18487ee
YR
1923 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1924 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1925 /* set FLOAT */
1926 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1927 break;
a2fbb9ea 1928
c18487ee
YR
1929 default:
1930 break;
a2fbb9ea
ET
1931 }
1932
c18487ee 1933 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 1934 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 1935
a2fbb9ea
ET
1936 return 0;
1937}
1938
c18487ee 1939static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1940{
ad33ea3a
EG
1941 switch (bp->link_vars.ieee_fc &
1942 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 1943 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 1944 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1945 ADVERTISED_Pause);
1946 break;
1947 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 1948 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
1949 ADVERTISED_Pause);
1950 break;
1951 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 1952 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee
YR
1953 break;
1954 default:
34f80b04 1955 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1956 ADVERTISED_Pause);
1957 break;
1958 }
1959}
f1410647 1960
c18487ee
YR
1961static void bnx2x_link_report(struct bnx2x *bp)
1962{
1963 if (bp->link_vars.link_up) {
1964 if (bp->state == BNX2X_STATE_OPEN)
1965 netif_carrier_on(bp->dev);
1966 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 1967
c18487ee 1968 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 1969
c18487ee
YR
1970 if (bp->link_vars.duplex == DUPLEX_FULL)
1971 printk("full duplex");
1972 else
1973 printk("half duplex");
f1410647 1974
c0700f90
DM
1975 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
1976 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 1977 printk(", receive ");
c0700f90 1978 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
c18487ee
YR
1979 printk("& transmit ");
1980 } else {
1981 printk(", transmit ");
1982 }
1983 printk("flow control ON");
1984 }
1985 printk("\n");
f1410647 1986
c18487ee
YR
1987 } else { /* link_down */
1988 netif_carrier_off(bp->dev);
1989 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 1990 }
c18487ee
YR
1991}
1992
1993static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1994{
19680c48
EG
1995 if (!BP_NOMCP(bp)) {
1996 u8 rc;
a2fbb9ea 1997
19680c48 1998 /* Initialize link parameters structure variables */
8c99e7b0
YR
1999 /* It is recommended to turn off RX FC for jumbo frames
2000 for better performance */
2001 if (IS_E1HMF(bp))
c0700f90 2002 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
8c99e7b0 2003 else if (bp->dev->mtu > 5000)
c0700f90 2004 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2005 else
c0700f90 2006 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2007
4a37fb66 2008 bnx2x_acquire_phy_lock(bp);
19680c48 2009 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2010 bnx2x_release_phy_lock(bp);
a2fbb9ea 2011
3c96c68b
EG
2012 bnx2x_calc_fc_adv(bp);
2013
19680c48
EG
2014 if (bp->link_vars.link_up)
2015 bnx2x_link_report(bp);
a2fbb9ea 2016
34f80b04 2017
19680c48
EG
2018 return rc;
2019 }
2020 BNX2X_ERR("Bootcode is missing -not initializing link\n");
2021 return -EINVAL;
a2fbb9ea
ET
2022}
2023
c18487ee 2024static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2025{
19680c48 2026 if (!BP_NOMCP(bp)) {
4a37fb66 2027 bnx2x_acquire_phy_lock(bp);
19680c48 2028 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2029 bnx2x_release_phy_lock(bp);
a2fbb9ea 2030
19680c48
EG
2031 bnx2x_calc_fc_adv(bp);
2032 } else
2033 BNX2X_ERR("Bootcode is missing -not setting link\n");
c18487ee 2034}
a2fbb9ea 2035
c18487ee
YR
2036static void bnx2x__link_reset(struct bnx2x *bp)
2037{
19680c48 2038 if (!BP_NOMCP(bp)) {
4a37fb66 2039 bnx2x_acquire_phy_lock(bp);
19680c48 2040 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
4a37fb66 2041 bnx2x_release_phy_lock(bp);
19680c48
EG
2042 } else
2043 BNX2X_ERR("Bootcode is missing -not resetting link\n");
c18487ee 2044}
a2fbb9ea 2045
c18487ee
YR
2046static u8 bnx2x_link_test(struct bnx2x *bp)
2047{
2048 u8 rc;
a2fbb9ea 2049
4a37fb66 2050 bnx2x_acquire_phy_lock(bp);
c18487ee 2051 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2052 bnx2x_release_phy_lock(bp);
a2fbb9ea 2053
c18487ee
YR
2054 return rc;
2055}
a2fbb9ea 2056
8a1c38d1 2057static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2058{
8a1c38d1
EG
2059 u32 r_param = bp->link_vars.line_speed / 8;
2060 u32 fair_periodic_timeout_usec;
2061 u32 t_fair;
34f80b04 2062
8a1c38d1
EG
2063 memset(&(bp->cmng.rs_vars), 0,
2064 sizeof(struct rate_shaping_vars_per_port));
2065 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2066
8a1c38d1
EG
2067 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2068 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2069
8a1c38d1
EG
2070 /* this is the threshold below which no timer arming will occur
2071 1.25 coefficient is for the threshold to be a little bigger
2072 than the real time, to compensate for timer in-accuracy */
2073 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2074 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2075
8a1c38d1
EG
2076 /* resolution of fairness timer */
2077 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2078 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2079 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2080
8a1c38d1
EG
2081 /* this is the threshold below which we won't arm the timer anymore */
2082 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2083
8a1c38d1
EG
2084 /* we multiply by 1e3/8 to get bytes/msec.
2085 We don't want the credits to pass a credit
2086 of the t_fair*FAIR_MEM (algorithm resolution) */
2087 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2088 /* since each tick is 4 usec */
2089 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2090}
2091
8a1c38d1 2092static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2093{
2094 struct rate_shaping_vars_per_vn m_rs_vn;
2095 struct fairness_vars_per_vn m_fair_vn;
2096 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2097 u16 vn_min_rate, vn_max_rate;
2098 int i;
2099
2100 /* If function is hidden - set min and max to zeroes */
2101 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2102 vn_min_rate = 0;
2103 vn_max_rate = 0;
2104
2105 } else {
2106 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2107 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
8a1c38d1 2108 /* If fairness is enabled (not all min rates are zeroes) and
34f80b04 2109 if current min rate is zero - set it to 1.
33471629 2110 This is a requirement of the algorithm. */
8a1c38d1 2111 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04
EG
2112 vn_min_rate = DEF_MIN_RATE;
2113 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2114 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2115 }
2116
8a1c38d1
EG
2117 DP(NETIF_MSG_IFUP,
2118 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2119 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2120
2121 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2122 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2123
2124 /* global vn counter - maximal Mbps for this vn */
2125 m_rs_vn.vn_counter.rate = vn_max_rate;
2126
2127 /* quota - number of bytes transmitted in this period */
2128 m_rs_vn.vn_counter.quota =
2129 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2130
8a1c38d1 2131 if (bp->vn_weight_sum) {
34f80b04
EG
2132 /* credit for each period of the fairness algorithm:
2133 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2134 vn_weight_sum should not be larger than 10000, thus
2135 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2136 than zero */
34f80b04 2137 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2138 max((u32)(vn_min_rate * (T_FAIR_COEF /
2139 (8 * bp->vn_weight_sum))),
2140 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2141 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2142 m_fair_vn.vn_credit_delta);
2143 }
2144
34f80b04
EG
2145 /* Store it to internal memory */
2146 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2147 REG_WR(bp, BAR_XSTRORM_INTMEM +
2148 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2149 ((u32 *)(&m_rs_vn))[i]);
2150
2151 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2152 REG_WR(bp, BAR_XSTRORM_INTMEM +
2153 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2154 ((u32 *)(&m_fair_vn))[i]);
2155}
2156
8a1c38d1 2157
c18487ee
YR
2158/* This function is called upon link interrupt */
2159static void bnx2x_link_attn(struct bnx2x *bp)
2160{
bb2a0f7a
YG
2161 /* Make sure that we are synced with the current statistics */
2162 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2163
c18487ee 2164 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2165
bb2a0f7a
YG
2166 if (bp->link_vars.link_up) {
2167
1c06328c
EG
2168 /* dropless flow control */
2169 if (CHIP_IS_E1H(bp)) {
2170 int port = BP_PORT(bp);
2171 u32 pause_enabled = 0;
2172
2173 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2174 pause_enabled = 1;
2175
2176 REG_WR(bp, BAR_USTRORM_INTMEM +
2177 USTORM_PAUSE_ENABLED_OFFSET(port),
2178 pause_enabled);
2179 }
2180
bb2a0f7a
YG
2181 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2182 struct host_port_stats *pstats;
2183
2184 pstats = bnx2x_sp(bp, port_stats);
2185 /* reset old bmac stats */
2186 memset(&(pstats->mac_stx[0]), 0,
2187 sizeof(struct mac_stx));
2188 }
2189 if ((bp->state == BNX2X_STATE_OPEN) ||
2190 (bp->state == BNX2X_STATE_DISABLED))
2191 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2192 }
2193
c18487ee
YR
2194 /* indicate link status */
2195 bnx2x_link_report(bp);
34f80b04
EG
2196
2197 if (IS_E1HMF(bp)) {
8a1c38d1 2198 int port = BP_PORT(bp);
34f80b04 2199 int func;
8a1c38d1 2200 int vn;
34f80b04
EG
2201
2202 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2203 if (vn == BP_E1HVN(bp))
2204 continue;
2205
8a1c38d1 2206 func = ((vn << 1) | port);
34f80b04
EG
2207
2208 /* Set the attention towards other drivers
2209 on the same port */
2210 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2211 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2212 }
34f80b04 2213
8a1c38d1
EG
2214 if (bp->link_vars.link_up) {
2215 int i;
2216
2217 /* Init rate shaping and fairness contexts */
2218 bnx2x_init_port_minmax(bp);
34f80b04 2219
34f80b04 2220 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2221 bnx2x_init_vn_minmax(bp, 2*vn + port);
2222
2223 /* Store it to internal memory */
2224 for (i = 0;
2225 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2226 REG_WR(bp, BAR_XSTRORM_INTMEM +
2227 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2228 ((u32 *)(&bp->cmng))[i]);
2229 }
34f80b04 2230 }
c18487ee 2231}
a2fbb9ea 2232
c18487ee
YR
2233static void bnx2x__link_status_update(struct bnx2x *bp)
2234{
2235 if (bp->state != BNX2X_STATE_OPEN)
2236 return;
a2fbb9ea 2237
c18487ee 2238 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2239
bb2a0f7a
YG
2240 if (bp->link_vars.link_up)
2241 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2242 else
2243 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2244
c18487ee
YR
2245 /* indicate link status */
2246 bnx2x_link_report(bp);
a2fbb9ea 2247}
a2fbb9ea 2248
34f80b04
EG
2249static void bnx2x_pmf_update(struct bnx2x *bp)
2250{
2251 int port = BP_PORT(bp);
2252 u32 val;
2253
2254 bp->port.pmf = 1;
2255 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2256
2257 /* enable nig attention */
2258 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2259 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2260 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2261
2262 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2263}
2264
c18487ee 2265/* end of Link */
a2fbb9ea
ET
2266
2267/* slow path */
2268
2269/*
2270 * General service functions
2271 */
2272
2273/* the slow path queue is odd since completions arrive on the fastpath ring */
2274static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2275 u32 data_hi, u32 data_lo, int common)
2276{
34f80b04 2277 int func = BP_FUNC(bp);
a2fbb9ea 2278
34f80b04
EG
2279 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2280 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2281 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2282 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2283 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2284
2285#ifdef BNX2X_STOP_ON_ERROR
2286 if (unlikely(bp->panic))
2287 return -EIO;
2288#endif
2289
34f80b04 2290 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2291
2292 if (!bp->spq_left) {
2293 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2294 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2295 bnx2x_panic();
2296 return -EBUSY;
2297 }
f1410647 2298
a2fbb9ea
ET
2299 /* CID needs port number to be encoded int it */
2300 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2301 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2302 HW_CID(bp, cid)));
2303 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2304 if (common)
2305 bp->spq_prod_bd->hdr.type |=
2306 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2307
2308 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2309 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2310
2311 bp->spq_left--;
2312
2313 if (bp->spq_prod_bd == bp->spq_last_bd) {
2314 bp->spq_prod_bd = bp->spq;
2315 bp->spq_prod_idx = 0;
2316 DP(NETIF_MSG_TIMER, "end of spq\n");
2317
2318 } else {
2319 bp->spq_prod_bd++;
2320 bp->spq_prod_idx++;
2321 }
2322
34f80b04 2323 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2324 bp->spq_prod_idx);
2325
34f80b04 2326 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2327 return 0;
2328}
2329
2330/* acquire split MCP access lock register */
4a37fb66 2331static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2332{
a2fbb9ea 2333 u32 i, j, val;
34f80b04 2334 int rc = 0;
a2fbb9ea
ET
2335
2336 might_sleep();
2337 i = 100;
2338 for (j = 0; j < i*10; j++) {
2339 val = (1UL << 31);
2340 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2341 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2342 if (val & (1L << 31))
2343 break;
2344
2345 msleep(5);
2346 }
a2fbb9ea 2347 if (!(val & (1L << 31))) {
19680c48 2348 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2349 rc = -EBUSY;
2350 }
2351
2352 return rc;
2353}
2354
4a37fb66
YG
2355/* release split MCP access lock register */
2356static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2357{
2358 u32 val = 0;
2359
2360 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2361}
2362
2363static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2364{
2365 struct host_def_status_block *def_sb = bp->def_status_blk;
2366 u16 rc = 0;
2367
2368 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2369 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2370 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2371 rc |= 1;
2372 }
2373 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2374 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2375 rc |= 2;
2376 }
2377 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2378 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2379 rc |= 4;
2380 }
2381 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2382 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2383 rc |= 8;
2384 }
2385 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2386 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2387 rc |= 16;
2388 }
2389 return rc;
2390}
2391
2392/*
2393 * slow path service functions
2394 */
2395
2396static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2397{
34f80b04 2398 int port = BP_PORT(bp);
5c862848
EG
2399 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2400 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2401 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2402 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2403 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2404 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2405 u32 aeu_mask;
a2fbb9ea 2406
a2fbb9ea
ET
2407 if (bp->attn_state & asserted)
2408 BNX2X_ERR("IGU ERROR\n");
2409
3fcaf2e5
EG
2410 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2411 aeu_mask = REG_RD(bp, aeu_addr);
2412
a2fbb9ea 2413 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2414 aeu_mask, asserted);
2415 aeu_mask &= ~(asserted & 0xff);
2416 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2417
3fcaf2e5
EG
2418 REG_WR(bp, aeu_addr, aeu_mask);
2419 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2420
3fcaf2e5 2421 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2422 bp->attn_state |= asserted;
3fcaf2e5 2423 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2424
2425 if (asserted & ATTN_HARD_WIRED_MASK) {
2426 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2427
a5e9a7cf
EG
2428 bnx2x_acquire_phy_lock(bp);
2429
877e9aa4
ET
2430 /* save nig interrupt mask */
2431 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2432 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2433
c18487ee 2434 bnx2x_link_attn(bp);
a2fbb9ea
ET
2435
2436 /* handle unicore attn? */
2437 }
2438 if (asserted & ATTN_SW_TIMER_4_FUNC)
2439 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2440
2441 if (asserted & GPIO_2_FUNC)
2442 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2443
2444 if (asserted & GPIO_3_FUNC)
2445 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2446
2447 if (asserted & GPIO_4_FUNC)
2448 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2449
2450 if (port == 0) {
2451 if (asserted & ATTN_GENERAL_ATTN_1) {
2452 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2453 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2454 }
2455 if (asserted & ATTN_GENERAL_ATTN_2) {
2456 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2457 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2458 }
2459 if (asserted & ATTN_GENERAL_ATTN_3) {
2460 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2461 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2462 }
2463 } else {
2464 if (asserted & ATTN_GENERAL_ATTN_4) {
2465 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2466 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2467 }
2468 if (asserted & ATTN_GENERAL_ATTN_5) {
2469 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2470 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2471 }
2472 if (asserted & ATTN_GENERAL_ATTN_6) {
2473 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2474 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2475 }
2476 }
2477
2478 } /* if hardwired */
2479
5c862848
EG
2480 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2481 asserted, hc_addr);
2482 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2483
2484 /* now set back the mask */
a5e9a7cf 2485 if (asserted & ATTN_NIG_FOR_FUNC) {
877e9aa4 2486 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
a5e9a7cf
EG
2487 bnx2x_release_phy_lock(bp);
2488 }
a2fbb9ea
ET
2489}
2490
877e9aa4 2491static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2492{
34f80b04 2493 int port = BP_PORT(bp);
877e9aa4
ET
2494 int reg_offset;
2495 u32 val;
2496
34f80b04
EG
2497 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2498 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2499
34f80b04 2500 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2501
2502 val = REG_RD(bp, reg_offset);
2503 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2504 REG_WR(bp, reg_offset, val);
2505
2506 BNX2X_ERR("SPIO5 hw attention\n");
2507
34f80b04 2508 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 2509 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
877e9aa4
ET
2510 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2511 /* Fan failure attention */
2512
17de50b7 2513 /* The PHY reset is controlled by GPIO 1 */
877e9aa4 2514 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
17de50b7
EG
2515 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2516 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2517 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2518 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4 2519 /* mark the failure */
c18487ee 2520 bp->link_params.ext_phy_config &=
877e9aa4 2521 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2522 bp->link_params.ext_phy_config |=
877e9aa4
ET
2523 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2524 SHMEM_WR(bp,
2525 dev_info.port_hw_config[port].
2526 external_phy_config,
c18487ee 2527 bp->link_params.ext_phy_config);
877e9aa4
ET
2528 /* log the failure */
2529 printk(KERN_ERR PFX "Fan Failure on Network"
2530 " Controller %s has caused the driver to"
2531 " shutdown the card to prevent permanent"
2532 " damage. Please contact Dell Support for"
2533 " assistance\n", bp->dev->name);
2534 break;
2535
2536 default:
2537 break;
2538 }
2539 }
34f80b04
EG
2540
2541 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2542
2543 val = REG_RD(bp, reg_offset);
2544 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2545 REG_WR(bp, reg_offset, val);
2546
2547 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2548 (attn & HW_INTERRUT_ASSERT_SET_0));
2549 bnx2x_panic();
2550 }
877e9aa4
ET
2551}
2552
2553static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2554{
2555 u32 val;
2556
2557 if (attn & BNX2X_DOORQ_ASSERT) {
2558
2559 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2560 BNX2X_ERR("DB hw attention 0x%x\n", val);
2561 /* DORQ discard attention */
2562 if (val & 0x2)
2563 BNX2X_ERR("FATAL error from DORQ\n");
2564 }
34f80b04
EG
2565
2566 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2567
2568 int port = BP_PORT(bp);
2569 int reg_offset;
2570
2571 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2572 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2573
2574 val = REG_RD(bp, reg_offset);
2575 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2576 REG_WR(bp, reg_offset, val);
2577
2578 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2579 (attn & HW_INTERRUT_ASSERT_SET_1));
2580 bnx2x_panic();
2581 }
877e9aa4
ET
2582}
2583
2584static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2585{
2586 u32 val;
2587
2588 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2589
2590 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2591 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2592 /* CFC error attention */
2593 if (val & 0x2)
2594 BNX2X_ERR("FATAL error from CFC\n");
2595 }
2596
2597 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2598
2599 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2600 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2601 /* RQ_USDMDP_FIFO_OVERFLOW */
2602 if (val & 0x18000)
2603 BNX2X_ERR("FATAL error from PXP\n");
2604 }
34f80b04
EG
2605
2606 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2607
2608 int port = BP_PORT(bp);
2609 int reg_offset;
2610
2611 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2612 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2613
2614 val = REG_RD(bp, reg_offset);
2615 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2616 REG_WR(bp, reg_offset, val);
2617
2618 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2619 (attn & HW_INTERRUT_ASSERT_SET_2));
2620 bnx2x_panic();
2621 }
877e9aa4
ET
2622}
2623
2624static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2625{
34f80b04
EG
2626 u32 val;
2627
877e9aa4
ET
2628 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2629
34f80b04
EG
2630 if (attn & BNX2X_PMF_LINK_ASSERT) {
2631 int func = BP_FUNC(bp);
2632
2633 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2634 bnx2x__link_status_update(bp);
2635 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2636 DRV_STATUS_PMF)
2637 bnx2x_pmf_update(bp);
2638
2639 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2640
2641 BNX2X_ERR("MC assert!\n");
2642 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2643 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2644 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2645 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2646 bnx2x_panic();
2647
2648 } else if (attn & BNX2X_MCP_ASSERT) {
2649
2650 BNX2X_ERR("MCP assert!\n");
2651 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2652 bnx2x_fw_dump(bp);
877e9aa4
ET
2653
2654 } else
2655 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2656 }
2657
2658 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2659 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2660 if (attn & BNX2X_GRC_TIMEOUT) {
2661 val = CHIP_IS_E1H(bp) ?
2662 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2663 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2664 }
2665 if (attn & BNX2X_GRC_RSV) {
2666 val = CHIP_IS_E1H(bp) ?
2667 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2668 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2669 }
877e9aa4 2670 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2671 }
2672}
2673
2674static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2675{
a2fbb9ea
ET
2676 struct attn_route attn;
2677 struct attn_route group_mask;
34f80b04 2678 int port = BP_PORT(bp);
877e9aa4 2679 int index;
a2fbb9ea
ET
2680 u32 reg_addr;
2681 u32 val;
3fcaf2e5 2682 u32 aeu_mask;
a2fbb9ea
ET
2683
2684 /* need to take HW lock because MCP or other port might also
2685 try to handle this event */
4a37fb66 2686 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2687
2688 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2689 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2690 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2691 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2692 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2693 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2694
2695 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2696 if (deasserted & (1 << index)) {
2697 group_mask = bp->attn_group[index];
2698
34f80b04
EG
2699 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2700 index, group_mask.sig[0], group_mask.sig[1],
2701 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2702
877e9aa4
ET
2703 bnx2x_attn_int_deasserted3(bp,
2704 attn.sig[3] & group_mask.sig[3]);
2705 bnx2x_attn_int_deasserted1(bp,
2706 attn.sig[1] & group_mask.sig[1]);
2707 bnx2x_attn_int_deasserted2(bp,
2708 attn.sig[2] & group_mask.sig[2]);
2709 bnx2x_attn_int_deasserted0(bp,
2710 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2711
a2fbb9ea
ET
2712 if ((attn.sig[0] & group_mask.sig[0] &
2713 HW_PRTY_ASSERT_SET_0) ||
2714 (attn.sig[1] & group_mask.sig[1] &
2715 HW_PRTY_ASSERT_SET_1) ||
2716 (attn.sig[2] & group_mask.sig[2] &
2717 HW_PRTY_ASSERT_SET_2))
6378c025 2718 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2719 }
2720 }
2721
4a37fb66 2722 bnx2x_release_alr(bp);
a2fbb9ea 2723
5c862848 2724 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2725
2726 val = ~deasserted;
3fcaf2e5
EG
2727 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2728 val, reg_addr);
5c862848 2729 REG_WR(bp, reg_addr, val);
a2fbb9ea 2730
a2fbb9ea 2731 if (~bp->attn_state & deasserted)
3fcaf2e5 2732 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2733
2734 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2735 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2736
3fcaf2e5
EG
2737 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2738 aeu_mask = REG_RD(bp, reg_addr);
2739
2740 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2741 aeu_mask, deasserted);
2742 aeu_mask |= (deasserted & 0xff);
2743 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2744
3fcaf2e5
EG
2745 REG_WR(bp, reg_addr, aeu_mask);
2746 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2747
2748 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2749 bp->attn_state &= ~deasserted;
2750 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2751}
2752
2753static void bnx2x_attn_int(struct bnx2x *bp)
2754{
2755 /* read local copy of bits */
68d59484
EG
2756 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2757 attn_bits);
2758 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2759 attn_bits_ack);
a2fbb9ea
ET
2760 u32 attn_state = bp->attn_state;
2761
2762 /* look for changed bits */
2763 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2764 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2765
2766 DP(NETIF_MSG_HW,
2767 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2768 attn_bits, attn_ack, asserted, deasserted);
2769
2770 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2771 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2772
2773 /* handle bits that were raised */
2774 if (asserted)
2775 bnx2x_attn_int_asserted(bp, asserted);
2776
2777 if (deasserted)
2778 bnx2x_attn_int_deasserted(bp, deasserted);
2779}
2780
2781static void bnx2x_sp_task(struct work_struct *work)
2782{
1cf167f2 2783 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
2784 u16 status;
2785
34f80b04 2786
a2fbb9ea
ET
2787 /* Return here if interrupt is disabled */
2788 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2789 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2790 return;
2791 }
2792
2793 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2794/* if (status == 0) */
2795/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2796
3196a88a 2797 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2798
877e9aa4
ET
2799 /* HW attentions */
2800 if (status & 0x1)
a2fbb9ea 2801 bnx2x_attn_int(bp);
a2fbb9ea 2802
68d59484 2803 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
2804 IGU_INT_NOP, 1);
2805 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2806 IGU_INT_NOP, 1);
2807 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2808 IGU_INT_NOP, 1);
2809 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2810 IGU_INT_NOP, 1);
2811 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2812 IGU_INT_ENABLE, 1);
877e9aa4 2813
a2fbb9ea
ET
2814}
2815
2816static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2817{
2818 struct net_device *dev = dev_instance;
2819 struct bnx2x *bp = netdev_priv(dev);
2820
2821 /* Return here if interrupt is disabled */
2822 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2823 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2824 return IRQ_HANDLED;
2825 }
2826
8d9c5f34 2827 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2828
2829#ifdef BNX2X_STOP_ON_ERROR
2830 if (unlikely(bp->panic))
2831 return IRQ_HANDLED;
2832#endif
2833
1cf167f2 2834 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
2835
2836 return IRQ_HANDLED;
2837}
2838
2839/* end of slow path */
2840
2841/* Statistics */
2842
2843/****************************************************************************
2844* Macros
2845****************************************************************************/
2846
a2fbb9ea
ET
2847/* sum[hi:lo] += add[hi:lo] */
2848#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2849 do { \
2850 s_lo += a_lo; \
f5ba6772 2851 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
2852 } while (0)
2853
2854/* difference = minuend - subtrahend */
2855#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2856 do { \
bb2a0f7a
YG
2857 if (m_lo < s_lo) { \
2858 /* underflow */ \
a2fbb9ea 2859 d_hi = m_hi - s_hi; \
bb2a0f7a 2860 if (d_hi > 0) { \
6378c025 2861 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2862 d_hi--; \
2863 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 2864 } else { \
6378c025 2865 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2866 d_hi = 0; \
2867 d_lo = 0; \
2868 } \
bb2a0f7a
YG
2869 } else { \
2870 /* m_lo >= s_lo */ \
a2fbb9ea 2871 if (m_hi < s_hi) { \
bb2a0f7a
YG
2872 d_hi = 0; \
2873 d_lo = 0; \
2874 } else { \
6378c025 2875 /* m_hi >= s_hi */ \
bb2a0f7a
YG
2876 d_hi = m_hi - s_hi; \
2877 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2878 } \
2879 } \
2880 } while (0)
2881
bb2a0f7a 2882#define UPDATE_STAT64(s, t) \
a2fbb9ea 2883 do { \
bb2a0f7a
YG
2884 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2885 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2886 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2887 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2888 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2889 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
2890 } while (0)
2891
bb2a0f7a 2892#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 2893 do { \
bb2a0f7a
YG
2894 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2895 diff.lo, new->s##_lo, old->s##_lo); \
2896 ADD_64(estats->t##_hi, diff.hi, \
2897 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
2898 } while (0)
2899
2900/* sum[hi:lo] += add */
2901#define ADD_EXTEND_64(s_hi, s_lo, a) \
2902 do { \
2903 s_lo += a; \
2904 s_hi += (s_lo < a) ? 1 : 0; \
2905 } while (0)
2906
bb2a0f7a 2907#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 2908 do { \
bb2a0f7a
YG
2909 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2910 pstats->mac_stx[1].s##_lo, \
2911 new->s); \
a2fbb9ea
ET
2912 } while (0)
2913
bb2a0f7a 2914#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea
ET
2915 do { \
2916 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2917 old_tclient->s = le32_to_cpu(tclient->s); \
de832a55
EG
2918 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
2919 } while (0)
2920
2921#define UPDATE_EXTEND_USTAT(s, t) \
2922 do { \
2923 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
2924 old_uclient->s = uclient->s; \
2925 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
2926 } while (0)
2927
2928#define UPDATE_EXTEND_XSTAT(s, t) \
2929 do { \
2930 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2931 old_xclient->s = le32_to_cpu(xclient->s); \
de832a55
EG
2932 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
2933 } while (0)
2934
2935/* minuend -= subtrahend */
2936#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
2937 do { \
2938 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
2939 } while (0)
2940
2941/* minuend[hi:lo] -= subtrahend */
2942#define SUB_EXTEND_64(m_hi, m_lo, s) \
2943 do { \
2944 SUB_64(m_hi, 0, m_lo, s); \
2945 } while (0)
2946
2947#define SUB_EXTEND_USTAT(s, t) \
2948 do { \
2949 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
2950 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
2951 } while (0)
2952
2953/*
2954 * General service functions
2955 */
2956
2957static inline long bnx2x_hilo(u32 *hiref)
2958{
2959 u32 lo = *(hiref + 1);
2960#if (BITS_PER_LONG == 64)
2961 u32 hi = *hiref;
2962
2963 return HILO_U64(hi, lo);
2964#else
2965 return lo;
2966#endif
2967}
2968
2969/*
2970 * Init service functions
2971 */
2972
bb2a0f7a
YG
2973static void bnx2x_storm_stats_post(struct bnx2x *bp)
2974{
2975 if (!bp->stats_pending) {
2976 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 2977 int i, rc;
bb2a0f7a
YG
2978
2979 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 2980 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
2981 for_each_queue(bp, i)
2982 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
2983
2984 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
2985 ((u32 *)&ramrod_data)[1],
2986 ((u32 *)&ramrod_data)[0], 0);
2987 if (rc == 0) {
2988 /* stats ramrod has it's own slot on the spq */
2989 bp->spq_left++;
2990 bp->stats_pending = 1;
2991 }
2992 }
2993}
2994
2995static void bnx2x_stats_init(struct bnx2x *bp)
2996{
2997 int port = BP_PORT(bp);
de832a55 2998 int i;
bb2a0f7a 2999
de832a55 3000 bp->stats_pending = 0;
bb2a0f7a
YG
3001 bp->executer_idx = 0;
3002 bp->stats_counter = 0;
3003
3004 /* port stats */
3005 if (!BP_NOMCP(bp))
3006 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3007 else
3008 bp->port.port_stx = 0;
3009 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3010
3011 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3012 bp->port.old_nig_stats.brb_discard =
3013 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3014 bp->port.old_nig_stats.brb_truncate =
3015 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3016 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3017 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3018 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3019 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3020
3021 /* function stats */
de832a55
EG
3022 for_each_queue(bp, i) {
3023 struct bnx2x_fastpath *fp = &bp->fp[i];
3024
3025 memset(&fp->old_tclient, 0,
3026 sizeof(struct tstorm_per_client_stats));
3027 memset(&fp->old_uclient, 0,
3028 sizeof(struct ustorm_per_client_stats));
3029 memset(&fp->old_xclient, 0,
3030 sizeof(struct xstorm_per_client_stats));
3031 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3032 }
3033
bb2a0f7a 3034 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
bb2a0f7a
YG
3035 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3036
3037 bp->stats_state = STATS_STATE_DISABLED;
3038 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3039 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3040}
3041
3042static void bnx2x_hw_stats_post(struct bnx2x *bp)
3043{
3044 struct dmae_command *dmae = &bp->stats_dmae;
3045 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3046
3047 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3048 if (CHIP_REV_IS_SLOW(bp))
3049 return;
bb2a0f7a
YG
3050
3051 /* loader */
3052 if (bp->executer_idx) {
3053 int loader_idx = PMF_DMAE_C(bp);
3054
3055 memset(dmae, 0, sizeof(struct dmae_command));
3056
3057 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3058 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3059 DMAE_CMD_DST_RESET |
3060#ifdef __BIG_ENDIAN
3061 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3062#else
3063 DMAE_CMD_ENDIANITY_DW_SWAP |
3064#endif
3065 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3066 DMAE_CMD_PORT_0) |
3067 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3068 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3069 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3070 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3071 sizeof(struct dmae_command) *
3072 (loader_idx + 1)) >> 2;
3073 dmae->dst_addr_hi = 0;
3074 dmae->len = sizeof(struct dmae_command) >> 2;
3075 if (CHIP_IS_E1(bp))
3076 dmae->len--;
3077 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3078 dmae->comp_addr_hi = 0;
3079 dmae->comp_val = 1;
3080
3081 *stats_comp = 0;
3082 bnx2x_post_dmae(bp, dmae, loader_idx);
3083
3084 } else if (bp->func_stx) {
3085 *stats_comp = 0;
3086 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3087 }
3088}
3089
3090static int bnx2x_stats_comp(struct bnx2x *bp)
3091{
3092 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3093 int cnt = 10;
3094
3095 might_sleep();
3096 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3097 if (!cnt) {
3098 BNX2X_ERR("timeout waiting for stats finished\n");
3099 break;
3100 }
3101 cnt--;
12469401 3102 msleep(1);
bb2a0f7a
YG
3103 }
3104 return 1;
3105}
3106
3107/*
3108 * Statistics service functions
3109 */
3110
3111static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3112{
3113 struct dmae_command *dmae;
3114 u32 opcode;
3115 int loader_idx = PMF_DMAE_C(bp);
3116 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3117
3118 /* sanity */
3119 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3120 BNX2X_ERR("BUG!\n");
3121 return;
3122 }
3123
3124 bp->executer_idx = 0;
3125
3126 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3127 DMAE_CMD_C_ENABLE |
3128 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3129#ifdef __BIG_ENDIAN
3130 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3131#else
3132 DMAE_CMD_ENDIANITY_DW_SWAP |
3133#endif
3134 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3135 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3136
3137 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3138 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3139 dmae->src_addr_lo = bp->port.port_stx >> 2;
3140 dmae->src_addr_hi = 0;
3141 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3142 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3143 dmae->len = DMAE_LEN32_RD_MAX;
3144 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3145 dmae->comp_addr_hi = 0;
3146 dmae->comp_val = 1;
3147
3148 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3149 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3150 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3151 dmae->src_addr_hi = 0;
7a9b2557
VZ
3152 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3153 DMAE_LEN32_RD_MAX * 4);
3154 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3155 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3156 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3157 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3158 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3159 dmae->comp_val = DMAE_COMP_VAL;
3160
3161 *stats_comp = 0;
3162 bnx2x_hw_stats_post(bp);
3163 bnx2x_stats_comp(bp);
3164}
3165
3166static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3167{
3168 struct dmae_command *dmae;
34f80b04 3169 int port = BP_PORT(bp);
bb2a0f7a 3170 int vn = BP_E1HVN(bp);
a2fbb9ea 3171 u32 opcode;
bb2a0f7a 3172 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3173 u32 mac_addr;
bb2a0f7a
YG
3174 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3175
3176 /* sanity */
3177 if (!bp->link_vars.link_up || !bp->port.pmf) {
3178 BNX2X_ERR("BUG!\n");
3179 return;
3180 }
a2fbb9ea
ET
3181
3182 bp->executer_idx = 0;
bb2a0f7a
YG
3183
3184 /* MCP */
3185 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3186 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3187 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3188#ifdef __BIG_ENDIAN
bb2a0f7a 3189 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3190#else
bb2a0f7a 3191 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3192#endif
bb2a0f7a
YG
3193 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3194 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3195
bb2a0f7a 3196 if (bp->port.port_stx) {
a2fbb9ea
ET
3197
3198 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3199 dmae->opcode = opcode;
bb2a0f7a
YG
3200 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3201 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3202 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3203 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3204 dmae->len = sizeof(struct host_port_stats) >> 2;
3205 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3206 dmae->comp_addr_hi = 0;
3207 dmae->comp_val = 1;
a2fbb9ea
ET
3208 }
3209
bb2a0f7a
YG
3210 if (bp->func_stx) {
3211
3212 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3213 dmae->opcode = opcode;
3214 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3215 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3216 dmae->dst_addr_lo = bp->func_stx >> 2;
3217 dmae->dst_addr_hi = 0;
3218 dmae->len = sizeof(struct host_func_stats) >> 2;
3219 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3220 dmae->comp_addr_hi = 0;
3221 dmae->comp_val = 1;
a2fbb9ea
ET
3222 }
3223
bb2a0f7a 3224 /* MAC */
a2fbb9ea
ET
3225 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3226 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3227 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3228#ifdef __BIG_ENDIAN
3229 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3230#else
3231 DMAE_CMD_ENDIANITY_DW_SWAP |
3232#endif
bb2a0f7a
YG
3233 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3234 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3235
c18487ee 3236 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3237
3238 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3239 NIG_REG_INGRESS_BMAC0_MEM);
3240
3241 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3242 BIGMAC_REGISTER_TX_STAT_GTBYT */
3243 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3244 dmae->opcode = opcode;
3245 dmae->src_addr_lo = (mac_addr +
3246 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3247 dmae->src_addr_hi = 0;
3248 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3249 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3250 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3251 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3252 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3253 dmae->comp_addr_hi = 0;
3254 dmae->comp_val = 1;
3255
3256 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3257 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3258 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3259 dmae->opcode = opcode;
3260 dmae->src_addr_lo = (mac_addr +
3261 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3262 dmae->src_addr_hi = 0;
3263 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3264 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3265 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3266 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3267 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3268 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3269 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3270 dmae->comp_addr_hi = 0;
3271 dmae->comp_val = 1;
3272
c18487ee 3273 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3274
3275 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3276
3277 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3278 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3279 dmae->opcode = opcode;
3280 dmae->src_addr_lo = (mac_addr +
3281 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3282 dmae->src_addr_hi = 0;
3283 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3284 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3285 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3286 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3287 dmae->comp_addr_hi = 0;
3288 dmae->comp_val = 1;
3289
3290 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3291 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3292 dmae->opcode = opcode;
3293 dmae->src_addr_lo = (mac_addr +
3294 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3295 dmae->src_addr_hi = 0;
3296 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3297 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3298 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3299 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3300 dmae->len = 1;
3301 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3302 dmae->comp_addr_hi = 0;
3303 dmae->comp_val = 1;
3304
3305 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3306 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3307 dmae->opcode = opcode;
3308 dmae->src_addr_lo = (mac_addr +
3309 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3310 dmae->src_addr_hi = 0;
3311 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3312 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3313 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3314 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3315 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3316 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3317 dmae->comp_addr_hi = 0;
3318 dmae->comp_val = 1;
3319 }
3320
3321 /* NIG */
bb2a0f7a
YG
3322 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3323 dmae->opcode = opcode;
3324 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3325 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3326 dmae->src_addr_hi = 0;
3327 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3328 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3329 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3330 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3331 dmae->comp_addr_hi = 0;
3332 dmae->comp_val = 1;
3333
3334 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3335 dmae->opcode = opcode;
3336 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3337 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3338 dmae->src_addr_hi = 0;
3339 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3340 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3341 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3342 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3343 dmae->len = (2*sizeof(u32)) >> 2;
3344 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3345 dmae->comp_addr_hi = 0;
3346 dmae->comp_val = 1;
3347
a2fbb9ea
ET
3348 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3349 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3350 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3351 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3352#ifdef __BIG_ENDIAN
3353 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3354#else
3355 DMAE_CMD_ENDIANITY_DW_SWAP |
3356#endif
bb2a0f7a
YG
3357 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3358 (vn << DMAE_CMD_E1HVN_SHIFT));
3359 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3360 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3361 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3362 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3363 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3364 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3365 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3366 dmae->len = (2*sizeof(u32)) >> 2;
3367 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3368 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3369 dmae->comp_val = DMAE_COMP_VAL;
3370
3371 *stats_comp = 0;
a2fbb9ea
ET
3372}
3373
bb2a0f7a 3374static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3375{
bb2a0f7a
YG
3376 struct dmae_command *dmae = &bp->stats_dmae;
3377 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3378
bb2a0f7a
YG
3379 /* sanity */
3380 if (!bp->func_stx) {
3381 BNX2X_ERR("BUG!\n");
3382 return;
3383 }
a2fbb9ea 3384
bb2a0f7a
YG
3385 bp->executer_idx = 0;
3386 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3387
bb2a0f7a
YG
3388 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3389 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3390 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3391#ifdef __BIG_ENDIAN
3392 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3393#else
3394 DMAE_CMD_ENDIANITY_DW_SWAP |
3395#endif
3396 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3397 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3398 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3399 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3400 dmae->dst_addr_lo = bp->func_stx >> 2;
3401 dmae->dst_addr_hi = 0;
3402 dmae->len = sizeof(struct host_func_stats) >> 2;
3403 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3404 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3405 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3406
bb2a0f7a
YG
3407 *stats_comp = 0;
3408}
a2fbb9ea 3409
bb2a0f7a
YG
3410static void bnx2x_stats_start(struct bnx2x *bp)
3411{
3412 if (bp->port.pmf)
3413 bnx2x_port_stats_init(bp);
3414
3415 else if (bp->func_stx)
3416 bnx2x_func_stats_init(bp);
3417
3418 bnx2x_hw_stats_post(bp);
3419 bnx2x_storm_stats_post(bp);
3420}
3421
3422static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3423{
3424 bnx2x_stats_comp(bp);
3425 bnx2x_stats_pmf_update(bp);
3426 bnx2x_stats_start(bp);
3427}
3428
3429static void bnx2x_stats_restart(struct bnx2x *bp)
3430{
3431 bnx2x_stats_comp(bp);
3432 bnx2x_stats_start(bp);
3433}
3434
3435static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3436{
3437 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3438 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3439 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3440 struct regpair diff;
3441
3442 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3443 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3444 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3445 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3446 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3447 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3448 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3449 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3450 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3451 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3452 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3453 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3454 UPDATE_STAT64(tx_stat_gt127,
3455 tx_stat_etherstatspkts65octetsto127octets);
3456 UPDATE_STAT64(tx_stat_gt255,
3457 tx_stat_etherstatspkts128octetsto255octets);
3458 UPDATE_STAT64(tx_stat_gt511,
3459 tx_stat_etherstatspkts256octetsto511octets);
3460 UPDATE_STAT64(tx_stat_gt1023,
3461 tx_stat_etherstatspkts512octetsto1023octets);
3462 UPDATE_STAT64(tx_stat_gt1518,
3463 tx_stat_etherstatspkts1024octetsto1522octets);
3464 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3465 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3466 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3467 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3468 UPDATE_STAT64(tx_stat_gterr,
3469 tx_stat_dot3statsinternalmactransmiterrors);
3470 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3471
3472 estats->pause_frames_received_hi =
3473 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3474 estats->pause_frames_received_lo =
3475 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3476
3477 estats->pause_frames_sent_hi =
3478 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3479 estats->pause_frames_sent_lo =
3480 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3481}
3482
3483static void bnx2x_emac_stats_update(struct bnx2x *bp)
3484{
3485 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3486 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3487 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3488
3489 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3490 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3491 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3492 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3493 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3494 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3495 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3496 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3497 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3498 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3499 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3500 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3501 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3502 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3503 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3504 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3505 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3506 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3507 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3508 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3509 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3510 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3511 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3512 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3513 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3514 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3515 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3516 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3517 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3518 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3519 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3520
3521 estats->pause_frames_received_hi =
3522 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3523 estats->pause_frames_received_lo =
3524 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3525 ADD_64(estats->pause_frames_received_hi,
3526 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3527 estats->pause_frames_received_lo,
3528 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3529
3530 estats->pause_frames_sent_hi =
3531 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3532 estats->pause_frames_sent_lo =
3533 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3534 ADD_64(estats->pause_frames_sent_hi,
3535 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3536 estats->pause_frames_sent_lo,
3537 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3538}
3539
3540static int bnx2x_hw_stats_update(struct bnx2x *bp)
3541{
3542 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3543 struct nig_stats *old = &(bp->port.old_nig_stats);
3544 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3545 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3546 struct regpair diff;
de832a55 3547 u32 nig_timer_max;
bb2a0f7a
YG
3548
3549 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3550 bnx2x_bmac_stats_update(bp);
3551
3552 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3553 bnx2x_emac_stats_update(bp);
3554
3555 else { /* unreached */
3556 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3557 return -1;
3558 }
a2fbb9ea 3559
bb2a0f7a
YG
3560 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3561 new->brb_discard - old->brb_discard);
66e855f3
YG
3562 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3563 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3564
bb2a0f7a
YG
3565 UPDATE_STAT64_NIG(egress_mac_pkt0,
3566 etherstatspkts1024octetsto1522octets);
3567 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3568
bb2a0f7a 3569 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3570
bb2a0f7a
YG
3571 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3572 sizeof(struct mac_stx));
3573 estats->brb_drop_hi = pstats->brb_drop_hi;
3574 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3575
bb2a0f7a 3576 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3577
de832a55
EG
3578 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3579 if (nig_timer_max != estats->nig_timer_max) {
3580 estats->nig_timer_max = nig_timer_max;
3581 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3582 }
3583
bb2a0f7a 3584 return 0;
a2fbb9ea
ET
3585}
3586
bb2a0f7a 3587static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3588{
3589 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 3590 struct tstorm_per_port_stats *tport =
de832a55 3591 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
3592 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3593 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
3594 int i;
3595
3596 memset(&(fstats->total_bytes_received_hi), 0,
3597 sizeof(struct host_func_stats) - 2*sizeof(u32));
3598 estats->error_bytes_received_hi = 0;
3599 estats->error_bytes_received_lo = 0;
3600 estats->etherstatsoverrsizepkts_hi = 0;
3601 estats->etherstatsoverrsizepkts_lo = 0;
3602 estats->no_buff_discard_hi = 0;
3603 estats->no_buff_discard_lo = 0;
a2fbb9ea 3604
de832a55
EG
3605 for_each_queue(bp, i) {
3606 struct bnx2x_fastpath *fp = &bp->fp[i];
3607 int cl_id = fp->cl_id;
3608 struct tstorm_per_client_stats *tclient =
3609 &stats->tstorm_common.client_statistics[cl_id];
3610 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3611 struct ustorm_per_client_stats *uclient =
3612 &stats->ustorm_common.client_statistics[cl_id];
3613 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3614 struct xstorm_per_client_stats *xclient =
3615 &stats->xstorm_common.client_statistics[cl_id];
3616 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3617 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3618 u32 diff;
3619
3620 /* are storm stats valid? */
3621 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 3622 bp->stats_counter) {
de832a55
EG
3623 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3624 " xstorm counter (%d) != stats_counter (%d)\n",
3625 i, xclient->stats_counter, bp->stats_counter);
3626 return -1;
3627 }
3628 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 3629 bp->stats_counter) {
de832a55
EG
3630 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3631 " tstorm counter (%d) != stats_counter (%d)\n",
3632 i, tclient->stats_counter, bp->stats_counter);
3633 return -2;
3634 }
3635 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3636 bp->stats_counter) {
3637 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3638 " ustorm counter (%d) != stats_counter (%d)\n",
3639 i, uclient->stats_counter, bp->stats_counter);
3640 return -4;
3641 }
a2fbb9ea 3642
de832a55
EG
3643 qstats->total_bytes_received_hi =
3644 qstats->valid_bytes_received_hi =
a2fbb9ea 3645 le32_to_cpu(tclient->total_rcv_bytes.hi);
de832a55
EG
3646 qstats->total_bytes_received_lo =
3647 qstats->valid_bytes_received_lo =
a2fbb9ea 3648 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a 3649
de832a55 3650 qstats->error_bytes_received_hi =
bb2a0f7a 3651 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 3652 qstats->error_bytes_received_lo =
bb2a0f7a 3653 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 3654
de832a55
EG
3655 ADD_64(qstats->total_bytes_received_hi,
3656 qstats->error_bytes_received_hi,
3657 qstats->total_bytes_received_lo,
3658 qstats->error_bytes_received_lo);
3659
3660 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3661 total_unicast_packets_received);
3662 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3663 total_multicast_packets_received);
3664 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3665 total_broadcast_packets_received);
3666 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3667 etherstatsoverrsizepkts);
3668 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3669
3670 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3671 total_unicast_packets_received);
3672 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3673 total_multicast_packets_received);
3674 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3675 total_broadcast_packets_received);
3676 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3677 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3678 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3679
3680 qstats->total_bytes_transmitted_hi =
bb2a0f7a 3681 le32_to_cpu(xclient->total_sent_bytes.hi);
de832a55 3682 qstats->total_bytes_transmitted_lo =
bb2a0f7a
YG
3683 le32_to_cpu(xclient->total_sent_bytes.lo);
3684
de832a55
EG
3685 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3686 total_unicast_packets_transmitted);
3687 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3688 total_multicast_packets_transmitted);
3689 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3690 total_broadcast_packets_transmitted);
3691
3692 old_tclient->checksum_discard = tclient->checksum_discard;
3693 old_tclient->ttl0_discard = tclient->ttl0_discard;
3694
3695 ADD_64(fstats->total_bytes_received_hi,
3696 qstats->total_bytes_received_hi,
3697 fstats->total_bytes_received_lo,
3698 qstats->total_bytes_received_lo);
3699 ADD_64(fstats->total_bytes_transmitted_hi,
3700 qstats->total_bytes_transmitted_hi,
3701 fstats->total_bytes_transmitted_lo,
3702 qstats->total_bytes_transmitted_lo);
3703 ADD_64(fstats->total_unicast_packets_received_hi,
3704 qstats->total_unicast_packets_received_hi,
3705 fstats->total_unicast_packets_received_lo,
3706 qstats->total_unicast_packets_received_lo);
3707 ADD_64(fstats->total_multicast_packets_received_hi,
3708 qstats->total_multicast_packets_received_hi,
3709 fstats->total_multicast_packets_received_lo,
3710 qstats->total_multicast_packets_received_lo);
3711 ADD_64(fstats->total_broadcast_packets_received_hi,
3712 qstats->total_broadcast_packets_received_hi,
3713 fstats->total_broadcast_packets_received_lo,
3714 qstats->total_broadcast_packets_received_lo);
3715 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3716 qstats->total_unicast_packets_transmitted_hi,
3717 fstats->total_unicast_packets_transmitted_lo,
3718 qstats->total_unicast_packets_transmitted_lo);
3719 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3720 qstats->total_multicast_packets_transmitted_hi,
3721 fstats->total_multicast_packets_transmitted_lo,
3722 qstats->total_multicast_packets_transmitted_lo);
3723 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3724 qstats->total_broadcast_packets_transmitted_hi,
3725 fstats->total_broadcast_packets_transmitted_lo,
3726 qstats->total_broadcast_packets_transmitted_lo);
3727 ADD_64(fstats->valid_bytes_received_hi,
3728 qstats->valid_bytes_received_hi,
3729 fstats->valid_bytes_received_lo,
3730 qstats->valid_bytes_received_lo);
3731
3732 ADD_64(estats->error_bytes_received_hi,
3733 qstats->error_bytes_received_hi,
3734 estats->error_bytes_received_lo,
3735 qstats->error_bytes_received_lo);
3736 ADD_64(estats->etherstatsoverrsizepkts_hi,
3737 qstats->etherstatsoverrsizepkts_hi,
3738 estats->etherstatsoverrsizepkts_lo,
3739 qstats->etherstatsoverrsizepkts_lo);
3740 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3741 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3742 }
3743
3744 ADD_64(fstats->total_bytes_received_hi,
3745 estats->rx_stat_ifhcinbadoctets_hi,
3746 fstats->total_bytes_received_lo,
3747 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
3748
3749 memcpy(estats, &(fstats->total_bytes_received_hi),
3750 sizeof(struct host_func_stats) - 2*sizeof(u32));
3751
de832a55
EG
3752 ADD_64(estats->etherstatsoverrsizepkts_hi,
3753 estats->rx_stat_dot3statsframestoolong_hi,
3754 estats->etherstatsoverrsizepkts_lo,
3755 estats->rx_stat_dot3statsframestoolong_lo);
3756 ADD_64(estats->error_bytes_received_hi,
3757 estats->rx_stat_ifhcinbadoctets_hi,
3758 estats->error_bytes_received_lo,
3759 estats->rx_stat_ifhcinbadoctets_lo);
3760
3761 if (bp->port.pmf) {
3762 estats->mac_filter_discard =
3763 le32_to_cpu(tport->mac_filter_discard);
3764 estats->xxoverflow_discard =
3765 le32_to_cpu(tport->xxoverflow_discard);
3766 estats->brb_truncate_discard =
bb2a0f7a 3767 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
3768 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3769 }
bb2a0f7a
YG
3770
3771 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 3772
de832a55
EG
3773 bp->stats_pending = 0;
3774
a2fbb9ea
ET
3775 return 0;
3776}
3777
bb2a0f7a 3778static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3779{
bb2a0f7a 3780 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3781 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 3782 int i;
a2fbb9ea
ET
3783
3784 nstats->rx_packets =
3785 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3786 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3787 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3788
3789 nstats->tx_packets =
3790 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3791 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3792 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3793
de832a55 3794 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 3795
0e39e645 3796 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3797
de832a55
EG
3798 nstats->rx_dropped = estats->mac_discard;
3799 for_each_queue(bp, i)
3800 nstats->rx_dropped +=
3801 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3802
a2fbb9ea
ET
3803 nstats->tx_dropped = 0;
3804
3805 nstats->multicast =
de832a55 3806 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 3807
bb2a0f7a 3808 nstats->collisions =
de832a55 3809 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
3810
3811 nstats->rx_length_errors =
de832a55
EG
3812 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3813 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3814 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3815 bnx2x_hilo(&estats->brb_truncate_hi);
3816 nstats->rx_crc_errors =
3817 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3818 nstats->rx_frame_errors =
3819 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3820 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
3821 nstats->rx_missed_errors = estats->xxoverflow_discard;
3822
3823 nstats->rx_errors = nstats->rx_length_errors +
3824 nstats->rx_over_errors +
3825 nstats->rx_crc_errors +
3826 nstats->rx_frame_errors +
0e39e645
ET
3827 nstats->rx_fifo_errors +
3828 nstats->rx_missed_errors;
a2fbb9ea 3829
bb2a0f7a 3830 nstats->tx_aborted_errors =
de832a55
EG
3831 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3832 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3833 nstats->tx_carrier_errors =
3834 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
3835 nstats->tx_fifo_errors = 0;
3836 nstats->tx_heartbeat_errors = 0;
3837 nstats->tx_window_errors = 0;
3838
3839 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
3840 nstats->tx_carrier_errors +
3841 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3842}
3843
3844static void bnx2x_drv_stats_update(struct bnx2x *bp)
3845{
3846 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3847 int i;
3848
3849 estats->driver_xoff = 0;
3850 estats->rx_err_discard_pkt = 0;
3851 estats->rx_skb_alloc_failed = 0;
3852 estats->hw_csum_err = 0;
3853 for_each_queue(bp, i) {
3854 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3855
3856 estats->driver_xoff += qstats->driver_xoff;
3857 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3858 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3859 estats->hw_csum_err += qstats->hw_csum_err;
3860 }
a2fbb9ea
ET
3861}
3862
bb2a0f7a 3863static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3864{
bb2a0f7a 3865 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3866
bb2a0f7a
YG
3867 if (*stats_comp != DMAE_COMP_VAL)
3868 return;
3869
3870 if (bp->port.pmf)
de832a55 3871 bnx2x_hw_stats_update(bp);
a2fbb9ea 3872
de832a55
EG
3873 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3874 BNX2X_ERR("storm stats were not updated for 3 times\n");
3875 bnx2x_panic();
3876 return;
a2fbb9ea
ET
3877 }
3878
de832a55
EG
3879 bnx2x_net_stats_update(bp);
3880 bnx2x_drv_stats_update(bp);
3881
a2fbb9ea 3882 if (bp->msglevel & NETIF_MSG_TIMER) {
de832a55
EG
3883 struct tstorm_per_client_stats *old_tclient =
3884 &bp->fp->old_tclient;
3885 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 3886 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3887 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 3888 int i;
a2fbb9ea
ET
3889
3890 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3891 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3892 " tx pkt (%lx)\n",
3893 bnx2x_tx_avail(bp->fp),
7a9b2557 3894 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
3895 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3896 " rx pkt (%lx)\n",
7a9b2557
VZ
3897 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3898 bp->fp->rx_comp_cons),
3899 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
de832a55
EG
3900 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
3901 "brb truncate %u\n",
3902 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
3903 qstats->driver_xoff,
3904 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 3905 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 3906 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
3907 "mac_discard %u mac_filter_discard %u "
3908 "xxovrflow_discard %u brb_truncate_discard %u "
3909 "ttl0_discard %u\n",
bb2a0f7a 3910 old_tclient->checksum_discard,
de832a55
EG
3911 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
3912 bnx2x_hilo(&qstats->no_buff_discard_hi),
3913 estats->mac_discard, estats->mac_filter_discard,
3914 estats->xxoverflow_discard, estats->brb_truncate_discard,
bb2a0f7a 3915 old_tclient->ttl0_discard);
a2fbb9ea
ET
3916
3917 for_each_queue(bp, i) {
3918 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3919 bnx2x_fp(bp, i, tx_pkt),
3920 bnx2x_fp(bp, i, rx_pkt),
3921 bnx2x_fp(bp, i, rx_calls));
3922 }
3923 }
3924
bb2a0f7a
YG
3925 bnx2x_hw_stats_post(bp);
3926 bnx2x_storm_stats_post(bp);
3927}
a2fbb9ea 3928
bb2a0f7a
YG
3929static void bnx2x_port_stats_stop(struct bnx2x *bp)
3930{
3931 struct dmae_command *dmae;
3932 u32 opcode;
3933 int loader_idx = PMF_DMAE_C(bp);
3934 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3935
bb2a0f7a 3936 bp->executer_idx = 0;
a2fbb9ea 3937
bb2a0f7a
YG
3938 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3939 DMAE_CMD_C_ENABLE |
3940 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3941#ifdef __BIG_ENDIAN
bb2a0f7a 3942 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3943#else
bb2a0f7a 3944 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3945#endif
bb2a0f7a
YG
3946 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3947 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3948
3949 if (bp->port.port_stx) {
3950
3951 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3952 if (bp->func_stx)
3953 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3954 else
3955 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3956 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3957 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3958 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3959 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3960 dmae->len = sizeof(struct host_port_stats) >> 2;
3961 if (bp->func_stx) {
3962 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3963 dmae->comp_addr_hi = 0;
3964 dmae->comp_val = 1;
3965 } else {
3966 dmae->comp_addr_lo =
3967 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3968 dmae->comp_addr_hi =
3969 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3970 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3971
bb2a0f7a
YG
3972 *stats_comp = 0;
3973 }
a2fbb9ea
ET
3974 }
3975
bb2a0f7a
YG
3976 if (bp->func_stx) {
3977
3978 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3979 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3980 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3981 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3982 dmae->dst_addr_lo = bp->func_stx >> 2;
3983 dmae->dst_addr_hi = 0;
3984 dmae->len = sizeof(struct host_func_stats) >> 2;
3985 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3986 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3987 dmae->comp_val = DMAE_COMP_VAL;
3988
3989 *stats_comp = 0;
a2fbb9ea 3990 }
bb2a0f7a
YG
3991}
3992
3993static void bnx2x_stats_stop(struct bnx2x *bp)
3994{
3995 int update = 0;
3996
3997 bnx2x_stats_comp(bp);
3998
3999 if (bp->port.pmf)
4000 update = (bnx2x_hw_stats_update(bp) == 0);
4001
4002 update |= (bnx2x_storm_stats_update(bp) == 0);
4003
4004 if (update) {
4005 bnx2x_net_stats_update(bp);
a2fbb9ea 4006
bb2a0f7a
YG
4007 if (bp->port.pmf)
4008 bnx2x_port_stats_stop(bp);
4009
4010 bnx2x_hw_stats_post(bp);
4011 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4012 }
4013}
4014
bb2a0f7a
YG
4015static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4016{
4017}
4018
4019static const struct {
4020 void (*action)(struct bnx2x *bp);
4021 enum bnx2x_stats_state next_state;
4022} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4023/* state event */
4024{
4025/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4026/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4027/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4028/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4029},
4030{
4031/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4032/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4033/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4034/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4035}
4036};
4037
4038static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4039{
4040 enum bnx2x_stats_state state = bp->stats_state;
4041
4042 bnx2x_stats_stm[state][event].action(bp);
4043 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4044
4045 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4046 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4047 state, event, bp->stats_state);
4048}
4049
a2fbb9ea
ET
4050static void bnx2x_timer(unsigned long data)
4051{
4052 struct bnx2x *bp = (struct bnx2x *) data;
4053
4054 if (!netif_running(bp->dev))
4055 return;
4056
4057 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4058 goto timer_restart;
a2fbb9ea
ET
4059
4060 if (poll) {
4061 struct bnx2x_fastpath *fp = &bp->fp[0];
4062 int rc;
4063
4064 bnx2x_tx_int(fp, 1000);
4065 rc = bnx2x_rx_int(fp, 1000);
4066 }
4067
34f80b04
EG
4068 if (!BP_NOMCP(bp)) {
4069 int func = BP_FUNC(bp);
a2fbb9ea
ET
4070 u32 drv_pulse;
4071 u32 mcp_pulse;
4072
4073 ++bp->fw_drv_pulse_wr_seq;
4074 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4075 /* TBD - add SYSTEM_TIME */
4076 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4077 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4078
34f80b04 4079 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4080 MCP_PULSE_SEQ_MASK);
4081 /* The delta between driver pulse and mcp response
4082 * should be 1 (before mcp response) or 0 (after mcp response)
4083 */
4084 if ((drv_pulse != mcp_pulse) &&
4085 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4086 /* someone lost a heartbeat... */
4087 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4088 drv_pulse, mcp_pulse);
4089 }
4090 }
4091
bb2a0f7a
YG
4092 if ((bp->state == BNX2X_STATE_OPEN) ||
4093 (bp->state == BNX2X_STATE_DISABLED))
4094 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4095
f1410647 4096timer_restart:
a2fbb9ea
ET
4097 mod_timer(&bp->timer, jiffies + bp->current_interval);
4098}
4099
4100/* end of Statistics */
4101
4102/* nic init */
4103
4104/*
4105 * nic init service functions
4106 */
4107
34f80b04 4108static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4109{
34f80b04
EG
4110 int port = BP_PORT(bp);
4111
4112 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4113 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4114 sizeof(struct ustorm_status_block)/4);
34f80b04
EG
4115 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4116 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4117 sizeof(struct cstorm_status_block)/4);
34f80b04
EG
4118}
4119
5c862848
EG
4120static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4121 dma_addr_t mapping, int sb_id)
34f80b04
EG
4122{
4123 int port = BP_PORT(bp);
bb2a0f7a 4124 int func = BP_FUNC(bp);
a2fbb9ea 4125 int index;
34f80b04 4126 u64 section;
a2fbb9ea
ET
4127
4128 /* USTORM */
4129 section = ((u64)mapping) + offsetof(struct host_status_block,
4130 u_status_block);
34f80b04 4131 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4132
4133 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4134 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4135 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4136 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4137 U64_HI(section));
bb2a0f7a
YG
4138 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4139 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4140
4141 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4142 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4143 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4144
4145 /* CSTORM */
4146 section = ((u64)mapping) + offsetof(struct host_status_block,
4147 c_status_block);
34f80b04 4148 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4149
4150 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4151 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4152 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4153 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4154 U64_HI(section));
7a9b2557
VZ
4155 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4156 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4157
4158 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4159 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4160 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4161
4162 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4163}
4164
4165static void bnx2x_zero_def_sb(struct bnx2x *bp)
4166{
4167 int func = BP_FUNC(bp);
a2fbb9ea 4168
34f80b04
EG
4169 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4170 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4171 sizeof(struct ustorm_def_status_block)/4);
4172 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4173 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4174 sizeof(struct cstorm_def_status_block)/4);
4175 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4176 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4177 sizeof(struct xstorm_def_status_block)/4);
4178 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4179 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4180 sizeof(struct tstorm_def_status_block)/4);
a2fbb9ea
ET
4181}
4182
4183static void bnx2x_init_def_sb(struct bnx2x *bp,
4184 struct host_def_status_block *def_sb,
34f80b04 4185 dma_addr_t mapping, int sb_id)
a2fbb9ea 4186{
34f80b04
EG
4187 int port = BP_PORT(bp);
4188 int func = BP_FUNC(bp);
a2fbb9ea
ET
4189 int index, val, reg_offset;
4190 u64 section;
4191
4192 /* ATTN */
4193 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4194 atten_status_block);
34f80b04 4195 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4196
49d66772
ET
4197 bp->attn_state = 0;
4198
a2fbb9ea
ET
4199 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4200 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4201
34f80b04 4202 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4203 bp->attn_group[index].sig[0] = REG_RD(bp,
4204 reg_offset + 0x10*index);
4205 bp->attn_group[index].sig[1] = REG_RD(bp,
4206 reg_offset + 0x4 + 0x10*index);
4207 bp->attn_group[index].sig[2] = REG_RD(bp,
4208 reg_offset + 0x8 + 0x10*index);
4209 bp->attn_group[index].sig[3] = REG_RD(bp,
4210 reg_offset + 0xc + 0x10*index);
4211 }
4212
a2fbb9ea
ET
4213 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4214 HC_REG_ATTN_MSG0_ADDR_L);
4215
4216 REG_WR(bp, reg_offset, U64_LO(section));
4217 REG_WR(bp, reg_offset + 4, U64_HI(section));
4218
4219 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4220
4221 val = REG_RD(bp, reg_offset);
34f80b04 4222 val |= sb_id;
a2fbb9ea
ET
4223 REG_WR(bp, reg_offset, val);
4224
4225 /* USTORM */
4226 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4227 u_def_status_block);
34f80b04 4228 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4229
4230 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4231 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4232 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4233 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4234 U64_HI(section));
5c862848 4235 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
34f80b04 4236 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4237
4238 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4239 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4240 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4241
4242 /* CSTORM */
4243 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4244 c_def_status_block);
34f80b04 4245 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4246
4247 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4248 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4249 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4250 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4251 U64_HI(section));
5c862848 4252 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
34f80b04 4253 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4254
4255 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4256 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4257 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4258
4259 /* TSTORM */
4260 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4261 t_def_status_block);
34f80b04 4262 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4263
4264 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4265 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4266 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4267 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4268 U64_HI(section));
5c862848 4269 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4270 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4271
4272 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4273 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4274 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4275
4276 /* XSTORM */
4277 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4278 x_def_status_block);
34f80b04 4279 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4280
4281 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4282 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4283 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4284 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4285 U64_HI(section));
5c862848 4286 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4287 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4288
4289 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4290 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4291 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4292
bb2a0f7a 4293 bp->stats_pending = 0;
66e855f3 4294 bp->set_mac_pending = 0;
bb2a0f7a 4295
34f80b04 4296 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4297}
4298
4299static void bnx2x_update_coalesce(struct bnx2x *bp)
4300{
34f80b04 4301 int port = BP_PORT(bp);
a2fbb9ea
ET
4302 int i;
4303
4304 for_each_queue(bp, i) {
34f80b04 4305 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4306
4307 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4308 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4309 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4310 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4311 bp->rx_ticks/12);
a2fbb9ea 4312 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4313 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848
EG
4314 U_SB_ETH_RX_CQ_INDEX),
4315 bp->rx_ticks ? 0 : 1);
a2fbb9ea
ET
4316
4317 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4318 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4319 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4320 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4321 bp->tx_ticks/12);
a2fbb9ea 4322 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4323 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4324 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4325 bp->tx_ticks ? 0 : 1);
a2fbb9ea
ET
4326 }
4327}
4328
7a9b2557
VZ
4329static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4330 struct bnx2x_fastpath *fp, int last)
4331{
4332 int i;
4333
4334 for (i = 0; i < last; i++) {
4335 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4336 struct sk_buff *skb = rx_buf->skb;
4337
4338 if (skb == NULL) {
4339 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4340 continue;
4341 }
4342
4343 if (fp->tpa_state[i] == BNX2X_TPA_START)
4344 pci_unmap_single(bp->pdev,
4345 pci_unmap_addr(rx_buf, mapping),
437cf2f1 4346 bp->rx_buf_size,
7a9b2557
VZ
4347 PCI_DMA_FROMDEVICE);
4348
4349 dev_kfree_skb(skb);
4350 rx_buf->skb = NULL;
4351 }
4352}
4353
a2fbb9ea
ET
4354static void bnx2x_init_rx_rings(struct bnx2x *bp)
4355{
7a9b2557 4356 int func = BP_FUNC(bp);
32626230
EG
4357 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4358 ETH_MAX_AGGREGATION_QUEUES_E1H;
4359 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4360 int i, j;
a2fbb9ea 4361
0f00846d
EG
4362 bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4363 DP(NETIF_MSG_IFUP,
4364 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4365
7a9b2557 4366 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4367
555f6c78 4368 for_each_rx_queue(bp, j) {
32626230 4369 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4370
32626230 4371 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4372 fp->tpa_pool[i].skb =
4373 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4374 if (!fp->tpa_pool[i].skb) {
4375 BNX2X_ERR("Failed to allocate TPA "
4376 "skb pool for queue[%d] - "
4377 "disabling TPA on this "
4378 "queue!\n", j);
4379 bnx2x_free_tpa_pool(bp, fp, i);
4380 fp->disable_tpa = 1;
4381 break;
4382 }
4383 pci_unmap_addr_set((struct sw_rx_bd *)
4384 &bp->fp->tpa_pool[i],
4385 mapping, 0);
4386 fp->tpa_state[i] = BNX2X_TPA_STOP;
4387 }
4388 }
4389 }
4390
555f6c78 4391 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
4392 struct bnx2x_fastpath *fp = &bp->fp[j];
4393
4394 fp->rx_bd_cons = 0;
4395 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4396 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4397
4398 /* "next page" elements initialization */
4399 /* SGE ring */
4400 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4401 struct eth_rx_sge *sge;
4402
4403 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4404 sge->addr_hi =
4405 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4406 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4407 sge->addr_lo =
4408 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4409 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4410 }
4411
4412 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4413
7a9b2557 4414 /* RX BD ring */
a2fbb9ea
ET
4415 for (i = 1; i <= NUM_RX_RINGS; i++) {
4416 struct eth_rx_bd *rx_bd;
4417
4418 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4419 rx_bd->addr_hi =
4420 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4421 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4422 rx_bd->addr_lo =
4423 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4424 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4425 }
4426
34f80b04 4427 /* CQ ring */
a2fbb9ea
ET
4428 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4429 struct eth_rx_cqe_next_page *nextpg;
4430
4431 nextpg = (struct eth_rx_cqe_next_page *)
4432 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4433 nextpg->addr_hi =
4434 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4435 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4436 nextpg->addr_lo =
4437 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4438 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4439 }
4440
7a9b2557
VZ
4441 /* Allocate SGEs and initialize the ring elements */
4442 for (i = 0, ring_prod = 0;
4443 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4444
7a9b2557
VZ
4445 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4446 BNX2X_ERR("was only able to allocate "
4447 "%d rx sges\n", i);
4448 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4449 /* Cleanup already allocated elements */
4450 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4451 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4452 fp->disable_tpa = 1;
4453 ring_prod = 0;
4454 break;
4455 }
4456 ring_prod = NEXT_SGE_IDX(ring_prod);
4457 }
4458 fp->rx_sge_prod = ring_prod;
4459
4460 /* Allocate BDs and initialize BD ring */
66e855f3 4461 fp->rx_comp_cons = 0;
7a9b2557 4462 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4463 for (i = 0; i < bp->rx_ring_size; i++) {
4464 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4465 BNX2X_ERR("was only able to allocate "
de832a55
EG
4466 "%d rx skbs on queue[%d]\n", i, j);
4467 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4468 break;
4469 }
4470 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4471 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4472 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4473 }
4474
7a9b2557
VZ
4475 fp->rx_bd_prod = ring_prod;
4476 /* must not have more available CQEs than BDs */
4477 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4478 cqe_ring_prod);
a2fbb9ea
ET
4479 fp->rx_pkt = fp->rx_calls = 0;
4480
7a9b2557
VZ
4481 /* Warning!
4482 * this will generate an interrupt (to the TSTORM)
4483 * must only be done after chip is initialized
4484 */
4485 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4486 fp->rx_sge_prod);
a2fbb9ea
ET
4487 if (j != 0)
4488 continue;
4489
4490 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4491 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4492 U64_LO(fp->rx_comp_mapping));
4493 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4494 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4495 U64_HI(fp->rx_comp_mapping));
4496 }
4497}
4498
4499static void bnx2x_init_tx_ring(struct bnx2x *bp)
4500{
4501 int i, j;
4502
555f6c78 4503 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
4504 struct bnx2x_fastpath *fp = &bp->fp[j];
4505
4506 for (i = 1; i <= NUM_TX_RINGS; i++) {
4507 struct eth_tx_bd *tx_bd =
4508 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4509
4510 tx_bd->addr_hi =
4511 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4512 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4513 tx_bd->addr_lo =
4514 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4515 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4516 }
4517
4518 fp->tx_pkt_prod = 0;
4519 fp->tx_pkt_cons = 0;
4520 fp->tx_bd_prod = 0;
4521 fp->tx_bd_cons = 0;
4522 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4523 fp->tx_pkt = 0;
4524 }
4525}
4526
4527static void bnx2x_init_sp_ring(struct bnx2x *bp)
4528{
34f80b04 4529 int func = BP_FUNC(bp);
a2fbb9ea
ET
4530
4531 spin_lock_init(&bp->spq_lock);
4532
4533 bp->spq_left = MAX_SPQ_PENDING;
4534 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4535 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4536 bp->spq_prod_bd = bp->spq;
4537 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4538
34f80b04 4539 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4540 U64_LO(bp->spq_mapping));
34f80b04
EG
4541 REG_WR(bp,
4542 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4543 U64_HI(bp->spq_mapping));
4544
34f80b04 4545 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4546 bp->spq_prod_idx);
4547}
4548
4549static void bnx2x_init_context(struct bnx2x *bp)
4550{
4551 int i;
4552
4553 for_each_queue(bp, i) {
4554 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4555 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 4556 u8 cl_id = fp->cl_id;
34f80b04 4557 u8 sb_id = FP_SB_ID(fp);
a2fbb9ea 4558
34f80b04
EG
4559 context->ustorm_st_context.common.sb_index_numbers =
4560 BNX2X_RX_SB_INDEX_NUM;
4561 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4562 context->ustorm_st_context.common.status_block_id = sb_id;
4563 context->ustorm_st_context.common.flags =
de832a55
EG
4564 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4565 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4566 context->ustorm_st_context.common.statistics_counter_id =
4567 cl_id;
8d9c5f34 4568 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 4569 BNX2X_RX_ALIGN_SHIFT;
34f80b04 4570 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 4571 bp->rx_buf_size;
34f80b04 4572 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4573 U64_HI(fp->rx_desc_mapping);
34f80b04 4574 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4575 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4576 if (!fp->disable_tpa) {
4577 context->ustorm_st_context.common.flags |=
4578 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4579 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4580 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
4581 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4582 (u32)0xffff);
7a9b2557
VZ
4583 context->ustorm_st_context.common.sge_page_base_hi =
4584 U64_HI(fp->rx_sge_mapping);
4585 context->ustorm_st_context.common.sge_page_base_lo =
4586 U64_LO(fp->rx_sge_mapping);
4587 }
4588
8d9c5f34
EG
4589 context->ustorm_ag_context.cdu_usage =
4590 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4591 CDU_REGION_NUMBER_UCM_AG,
4592 ETH_CONNECTION_TYPE);
4593
4594 context->xstorm_st_context.tx_bd_page_base_hi =
4595 U64_HI(fp->tx_desc_mapping);
4596 context->xstorm_st_context.tx_bd_page_base_lo =
4597 U64_LO(fp->tx_desc_mapping);
4598 context->xstorm_st_context.db_data_addr_hi =
4599 U64_HI(fp->tx_prods_mapping);
4600 context->xstorm_st_context.db_data_addr_lo =
4601 U64_LO(fp->tx_prods_mapping);
4602 context->xstorm_st_context.statistics_data = (fp->cl_id |
4603 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea 4604 context->cstorm_st_context.sb_index_number =
5c862848 4605 C_SB_ETH_TX_CQ_INDEX;
34f80b04 4606 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4607
4608 context->xstorm_ag_context.cdu_reserved =
4609 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4610 CDU_REGION_NUMBER_XCM_AG,
4611 ETH_CONNECTION_TYPE);
a2fbb9ea
ET
4612 }
4613}
4614
4615static void bnx2x_init_ind_table(struct bnx2x *bp)
4616{
26c8fa4d 4617 int func = BP_FUNC(bp);
a2fbb9ea
ET
4618 int i;
4619
555f6c78 4620 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
4621 return;
4622
555f6c78
EG
4623 DP(NETIF_MSG_IFUP,
4624 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 4625 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 4626 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 4627 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
555f6c78 4628 BP_CL_ID(bp) + (i % bp->num_rx_queues));
a2fbb9ea
ET
4629}
4630
49d66772
ET
4631static void bnx2x_set_client_config(struct bnx2x *bp)
4632{
49d66772 4633 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4634 int port = BP_PORT(bp);
4635 int i;
49d66772 4636
e7799c5f 4637 tstorm_client.mtu = bp->dev->mtu;
49d66772 4638 tstorm_client.config_flags =
de832a55
EG
4639 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4640 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 4641#ifdef BCM_VLAN
0c6671b0 4642 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 4643 tstorm_client.config_flags |=
8d9c5f34 4644 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
4645 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4646 }
4647#endif
49d66772 4648
7a9b2557
VZ
4649 if (bp->flags & TPA_ENABLE_FLAG) {
4650 tstorm_client.max_sges_for_packet =
4f40f2cb 4651 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
4652 tstorm_client.max_sges_for_packet =
4653 ((tstorm_client.max_sges_for_packet +
4654 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4655 PAGES_PER_SGE_SHIFT;
4656
4657 tstorm_client.config_flags |=
4658 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4659 }
4660
49d66772 4661 for_each_queue(bp, i) {
de832a55
EG
4662 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4663
49d66772 4664 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4665 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4666 ((u32 *)&tstorm_client)[0]);
4667 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4668 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4669 ((u32 *)&tstorm_client)[1]);
4670 }
4671
34f80b04
EG
4672 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4673 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4674}
4675
a2fbb9ea
ET
4676static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4677{
a2fbb9ea 4678 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4679 int mode = bp->rx_mode;
4680 int mask = (1 << BP_L_ID(bp));
4681 int func = BP_FUNC(bp);
a2fbb9ea
ET
4682 int i;
4683
3196a88a 4684 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
4685
4686 switch (mode) {
4687 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4688 tstorm_mac_filter.ucast_drop_all = mask;
4689 tstorm_mac_filter.mcast_drop_all = mask;
4690 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea
ET
4691 break;
4692 case BNX2X_RX_MODE_NORMAL:
34f80b04 4693 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4694 break;
4695 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4696 tstorm_mac_filter.mcast_accept_all = mask;
4697 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4698 break;
4699 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4700 tstorm_mac_filter.ucast_accept_all = mask;
4701 tstorm_mac_filter.mcast_accept_all = mask;
4702 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4703 break;
4704 default:
34f80b04
EG
4705 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4706 break;
a2fbb9ea
ET
4707 }
4708
4709 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4710 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4711 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4712 ((u32 *)&tstorm_mac_filter)[i]);
4713
34f80b04 4714/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4715 ((u32 *)&tstorm_mac_filter)[i]); */
4716 }
a2fbb9ea 4717
49d66772
ET
4718 if (mode != BNX2X_RX_MODE_NONE)
4719 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4720}
4721
471de716
EG
4722static void bnx2x_init_internal_common(struct bnx2x *bp)
4723{
4724 int i;
4725
3cdf1db7
YG
4726 if (bp->flags & TPA_ENABLE_FLAG) {
4727 struct tstorm_eth_tpa_exist tpa = {0};
4728
4729 tpa.tpa_exist = 1;
4730
4731 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4732 ((u32 *)&tpa)[0]);
4733 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4734 ((u32 *)&tpa)[1]);
4735 }
4736
471de716
EG
4737 /* Zero this manually as its initialization is
4738 currently missing in the initTool */
4739 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4740 REG_WR(bp, BAR_USTRORM_INTMEM +
4741 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4742}
4743
4744static void bnx2x_init_internal_port(struct bnx2x *bp)
4745{
4746 int port = BP_PORT(bp);
4747
4748 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4749 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4750 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4751 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4752}
4753
8a1c38d1
EG
4754/* Calculates the sum of vn_min_rates.
4755 It's needed for further normalizing of the min_rates.
4756 Returns:
4757 sum of vn_min_rates.
4758 or
4759 0 - if all the min_rates are 0.
4760 In the later case fainess algorithm should be deactivated.
4761 If not all min_rates are zero then those that are zeroes will be set to 1.
4762 */
4763static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4764{
4765 int all_zero = 1;
4766 int port = BP_PORT(bp);
4767 int vn;
4768
4769 bp->vn_weight_sum = 0;
4770 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4771 int func = 2*vn + port;
4772 u32 vn_cfg =
4773 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4774 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4775 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4776
4777 /* Skip hidden vns */
4778 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4779 continue;
4780
4781 /* If min rate is zero - set it to 1 */
4782 if (!vn_min_rate)
4783 vn_min_rate = DEF_MIN_RATE;
4784 else
4785 all_zero = 0;
4786
4787 bp->vn_weight_sum += vn_min_rate;
4788 }
4789
4790 /* ... only if all min rates are zeros - disable fairness */
4791 if (all_zero)
4792 bp->vn_weight_sum = 0;
4793}
4794
471de716 4795static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4796{
a2fbb9ea
ET
4797 struct tstorm_eth_function_common_config tstorm_config = {0};
4798 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4799 int port = BP_PORT(bp);
4800 int func = BP_FUNC(bp);
de832a55
EG
4801 int i, j;
4802 u32 offset;
471de716 4803 u16 max_agg_size;
a2fbb9ea
ET
4804
4805 if (is_multi(bp)) {
555f6c78 4806 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
4807 tstorm_config.rss_result_mask = MULTI_MASK;
4808 }
8d9c5f34
EG
4809 if (IS_E1HMF(bp))
4810 tstorm_config.config_flags |=
4811 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 4812
34f80b04
EG
4813 tstorm_config.leading_client_id = BP_L_ID(bp);
4814
a2fbb9ea 4815 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4816 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4817 (*(u32 *)&tstorm_config));
4818
c14423fe 4819 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4820 bnx2x_set_storm_rx_mode(bp);
4821
de832a55
EG
4822 for_each_queue(bp, i) {
4823 u8 cl_id = bp->fp[i].cl_id;
4824
4825 /* reset xstorm per client statistics */
4826 offset = BAR_XSTRORM_INTMEM +
4827 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4828 for (j = 0;
4829 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4830 REG_WR(bp, offset + j*4, 0);
4831
4832 /* reset tstorm per client statistics */
4833 offset = BAR_TSTRORM_INTMEM +
4834 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4835 for (j = 0;
4836 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4837 REG_WR(bp, offset + j*4, 0);
4838
4839 /* reset ustorm per client statistics */
4840 offset = BAR_USTRORM_INTMEM +
4841 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4842 for (j = 0;
4843 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4844 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
4845 }
4846
4847 /* Init statistics related context */
34f80b04 4848 stats_flags.collect_eth = 1;
a2fbb9ea 4849
66e855f3 4850 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4851 ((u32 *)&stats_flags)[0]);
66e855f3 4852 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4853 ((u32 *)&stats_flags)[1]);
4854
66e855f3 4855 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4856 ((u32 *)&stats_flags)[0]);
66e855f3 4857 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4858 ((u32 *)&stats_flags)[1]);
4859
de832a55
EG
4860 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4861 ((u32 *)&stats_flags)[0]);
4862 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4863 ((u32 *)&stats_flags)[1]);
4864
66e855f3 4865 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4866 ((u32 *)&stats_flags)[0]);
66e855f3 4867 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4868 ((u32 *)&stats_flags)[1]);
4869
66e855f3
YG
4870 REG_WR(bp, BAR_XSTRORM_INTMEM +
4871 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4872 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4873 REG_WR(bp, BAR_XSTRORM_INTMEM +
4874 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4875 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4876
4877 REG_WR(bp, BAR_TSTRORM_INTMEM +
4878 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4879 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4880 REG_WR(bp, BAR_TSTRORM_INTMEM +
4881 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4882 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 4883
de832a55
EG
4884 REG_WR(bp, BAR_USTRORM_INTMEM +
4885 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4886 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4887 REG_WR(bp, BAR_USTRORM_INTMEM +
4888 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4889 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4890
34f80b04
EG
4891 if (CHIP_IS_E1H(bp)) {
4892 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4893 IS_E1HMF(bp));
4894 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4895 IS_E1HMF(bp));
4896 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4897 IS_E1HMF(bp));
4898 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4899 IS_E1HMF(bp));
4900
7a9b2557
VZ
4901 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4902 bp->e1hov);
34f80b04
EG
4903 }
4904
4f40f2cb
EG
4905 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4906 max_agg_size =
4907 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4908 SGE_PAGE_SIZE * PAGES_PER_SGE),
4909 (u32)0xffff);
555f6c78 4910 for_each_rx_queue(bp, i) {
7a9b2557 4911 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
4912
4913 REG_WR(bp, BAR_USTRORM_INTMEM +
4914 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4915 U64_LO(fp->rx_comp_mapping));
4916 REG_WR(bp, BAR_USTRORM_INTMEM +
4917 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4918 U64_HI(fp->rx_comp_mapping));
4919
7a9b2557
VZ
4920 REG_WR16(bp, BAR_USTRORM_INTMEM +
4921 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4922 max_agg_size);
4923 }
8a1c38d1 4924
1c06328c
EG
4925 /* dropless flow control */
4926 if (CHIP_IS_E1H(bp)) {
4927 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
4928
4929 rx_pause.bd_thr_low = 250;
4930 rx_pause.cqe_thr_low = 250;
4931 rx_pause.cos = 1;
4932 rx_pause.sge_thr_low = 0;
4933 rx_pause.bd_thr_high = 350;
4934 rx_pause.cqe_thr_high = 350;
4935 rx_pause.sge_thr_high = 0;
4936
4937 for_each_rx_queue(bp, i) {
4938 struct bnx2x_fastpath *fp = &bp->fp[i];
4939
4940 if (!fp->disable_tpa) {
4941 rx_pause.sge_thr_low = 150;
4942 rx_pause.sge_thr_high = 250;
4943 }
4944
4945
4946 offset = BAR_USTRORM_INTMEM +
4947 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
4948 fp->cl_id);
4949 for (j = 0;
4950 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
4951 j++)
4952 REG_WR(bp, offset + j*4,
4953 ((u32 *)&rx_pause)[j]);
4954 }
4955 }
4956
8a1c38d1
EG
4957 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
4958
4959 /* Init rate shaping and fairness contexts */
4960 if (IS_E1HMF(bp)) {
4961 int vn;
4962
4963 /* During init there is no active link
4964 Until link is up, set link rate to 10Gbps */
4965 bp->link_vars.line_speed = SPEED_10000;
4966 bnx2x_init_port_minmax(bp);
4967
4968 bnx2x_calc_vn_weight_sum(bp);
4969
4970 for (vn = VN_0; vn < E1HVN_MAX; vn++)
4971 bnx2x_init_vn_minmax(bp, 2*vn + port);
4972
4973 /* Enable rate shaping and fairness */
4974 bp->cmng.flags.cmng_enables =
4975 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
4976 if (bp->vn_weight_sum)
4977 bp->cmng.flags.cmng_enables |=
4978 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
4979 else
4980 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
4981 " fairness will be disabled\n");
4982 } else {
4983 /* rate shaping and fairness are disabled */
4984 DP(NETIF_MSG_IFUP,
4985 "single function mode minmax will be disabled\n");
4986 }
4987
4988
4989 /* Store it to internal memory */
4990 if (bp->port.pmf)
4991 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
4992 REG_WR(bp, BAR_XSTRORM_INTMEM +
4993 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
4994 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
4995}
4996
471de716
EG
4997static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4998{
4999 switch (load_code) {
5000 case FW_MSG_CODE_DRV_LOAD_COMMON:
5001 bnx2x_init_internal_common(bp);
5002 /* no break */
5003
5004 case FW_MSG_CODE_DRV_LOAD_PORT:
5005 bnx2x_init_internal_port(bp);
5006 /* no break */
5007
5008 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5009 bnx2x_init_internal_func(bp);
5010 break;
5011
5012 default:
5013 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5014 break;
5015 }
5016}
5017
5018static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5019{
5020 int i;
5021
5022 for_each_queue(bp, i) {
5023 struct bnx2x_fastpath *fp = &bp->fp[i];
5024
34f80b04 5025 fp->bp = bp;
a2fbb9ea 5026 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5027 fp->index = i;
34f80b04
EG
5028 fp->cl_id = BP_L_ID(bp) + i;
5029 fp->sb_id = fp->cl_id;
5030 DP(NETIF_MSG_IFUP,
5031 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
5032 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
5c862848
EG
5033 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5034 FP_SB_ID(fp));
5035 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5036 }
5037
5c862848
EG
5038 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5039 DEF_SB_ID);
5040 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5041 bnx2x_update_coalesce(bp);
5042 bnx2x_init_rx_rings(bp);
5043 bnx2x_init_tx_ring(bp);
5044 bnx2x_init_sp_ring(bp);
5045 bnx2x_init_context(bp);
471de716 5046 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5047 bnx2x_init_ind_table(bp);
0ef00459
EG
5048 bnx2x_stats_init(bp);
5049
5050 /* At this point, we are ready for interrupts */
5051 atomic_set(&bp->intr_sem, 0);
5052
5053 /* flush all before enabling interrupts */
5054 mb();
5055 mmiowb();
5056
615f8fd9 5057 bnx2x_int_enable(bp);
a2fbb9ea
ET
5058}
5059
5060/* end of nic init */
5061
5062/*
5063 * gzip service functions
5064 */
5065
5066static int bnx2x_gunzip_init(struct bnx2x *bp)
5067{
5068 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5069 &bp->gunzip_mapping);
5070 if (bp->gunzip_buf == NULL)
5071 goto gunzip_nomem1;
5072
5073 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5074 if (bp->strm == NULL)
5075 goto gunzip_nomem2;
5076
5077 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5078 GFP_KERNEL);
5079 if (bp->strm->workspace == NULL)
5080 goto gunzip_nomem3;
5081
5082 return 0;
5083
5084gunzip_nomem3:
5085 kfree(bp->strm);
5086 bp->strm = NULL;
5087
5088gunzip_nomem2:
5089 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5090 bp->gunzip_mapping);
5091 bp->gunzip_buf = NULL;
5092
5093gunzip_nomem1:
5094 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 5095 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
5096 return -ENOMEM;
5097}
5098
5099static void bnx2x_gunzip_end(struct bnx2x *bp)
5100{
5101 kfree(bp->strm->workspace);
5102
5103 kfree(bp->strm);
5104 bp->strm = NULL;
5105
5106 if (bp->gunzip_buf) {
5107 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5108 bp->gunzip_mapping);
5109 bp->gunzip_buf = NULL;
5110 }
5111}
5112
5113static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5114{
5115 int n, rc;
5116
5117 /* check gzip header */
5118 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5119 return -EINVAL;
5120
5121 n = 10;
5122
34f80b04 5123#define FNAME 0x8
a2fbb9ea
ET
5124
5125 if (zbuf[3] & FNAME)
5126 while ((zbuf[n++] != 0) && (n < len));
5127
5128 bp->strm->next_in = zbuf + n;
5129 bp->strm->avail_in = len - n;
5130 bp->strm->next_out = bp->gunzip_buf;
5131 bp->strm->avail_out = FW_BUF_SIZE;
5132
5133 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5134 if (rc != Z_OK)
5135 return rc;
5136
5137 rc = zlib_inflate(bp->strm, Z_FINISH);
5138 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5139 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5140 bp->dev->name, bp->strm->msg);
5141
5142 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5143 if (bp->gunzip_outlen & 0x3)
5144 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5145 " gunzip_outlen (%d) not aligned\n",
5146 bp->dev->name, bp->gunzip_outlen);
5147 bp->gunzip_outlen >>= 2;
5148
5149 zlib_inflateEnd(bp->strm);
5150
5151 if (rc == Z_STREAM_END)
5152 return 0;
5153
5154 return rc;
5155}
5156
5157/* nic load/unload */
5158
5159/*
34f80b04 5160 * General service functions
a2fbb9ea
ET
5161 */
5162
5163/* send a NIG loopback debug packet */
5164static void bnx2x_lb_pckt(struct bnx2x *bp)
5165{
a2fbb9ea 5166 u32 wb_write[3];
a2fbb9ea
ET
5167
5168 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5169 wb_write[0] = 0x55555555;
5170 wb_write[1] = 0x55555555;
34f80b04 5171 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5172 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5173
5174 /* NON-IP protocol */
a2fbb9ea
ET
5175 wb_write[0] = 0x09000000;
5176 wb_write[1] = 0x55555555;
34f80b04 5177 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5178 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5179}
5180
5181/* some of the internal memories
5182 * are not directly readable from the driver
5183 * to test them we send debug packets
5184 */
5185static int bnx2x_int_mem_test(struct bnx2x *bp)
5186{
5187 int factor;
5188 int count, i;
5189 u32 val = 0;
5190
ad8d3948 5191 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5192 factor = 120;
ad8d3948
EG
5193 else if (CHIP_REV_IS_EMUL(bp))
5194 factor = 200;
5195 else
a2fbb9ea 5196 factor = 1;
a2fbb9ea
ET
5197
5198 DP(NETIF_MSG_HW, "start part1\n");
5199
5200 /* Disable inputs of parser neighbor blocks */
5201 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5202 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5203 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5204 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5205
5206 /* Write 0 to parser credits for CFC search request */
5207 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5208
5209 /* send Ethernet packet */
5210 bnx2x_lb_pckt(bp);
5211
5212 /* TODO do i reset NIG statistic? */
5213 /* Wait until NIG register shows 1 packet of size 0x10 */
5214 count = 1000 * factor;
5215 while (count) {
34f80b04 5216
a2fbb9ea
ET
5217 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5218 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5219 if (val == 0x10)
5220 break;
5221
5222 msleep(10);
5223 count--;
5224 }
5225 if (val != 0x10) {
5226 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5227 return -1;
5228 }
5229
5230 /* Wait until PRS register shows 1 packet */
5231 count = 1000 * factor;
5232 while (count) {
5233 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5234 if (val == 1)
5235 break;
5236
5237 msleep(10);
5238 count--;
5239 }
5240 if (val != 0x1) {
5241 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5242 return -2;
5243 }
5244
5245 /* Reset and init BRB, PRS */
34f80b04 5246 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5247 msleep(50);
34f80b04 5248 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea
ET
5249 msleep(50);
5250 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5251 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5252
5253 DP(NETIF_MSG_HW, "part2\n");
5254
5255 /* Disable inputs of parser neighbor blocks */
5256 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5257 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5258 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5259 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5260
5261 /* Write 0 to parser credits for CFC search request */
5262 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5263
5264 /* send 10 Ethernet packets */
5265 for (i = 0; i < 10; i++)
5266 bnx2x_lb_pckt(bp);
5267
5268 /* Wait until NIG register shows 10 + 1
5269 packets of size 11*0x10 = 0xb0 */
5270 count = 1000 * factor;
5271 while (count) {
34f80b04 5272
a2fbb9ea
ET
5273 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5274 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5275 if (val == 0xb0)
5276 break;
5277
5278 msleep(10);
5279 count--;
5280 }
5281 if (val != 0xb0) {
5282 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5283 return -3;
5284 }
5285
5286 /* Wait until PRS register shows 2 packets */
5287 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5288 if (val != 2)
5289 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5290
5291 /* Write 1 to parser credits for CFC search request */
5292 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5293
5294 /* Wait until PRS register shows 3 packets */
5295 msleep(10 * factor);
5296 /* Wait until NIG register shows 1 packet of size 0x10 */
5297 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5298 if (val != 3)
5299 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5300
5301 /* clear NIG EOP FIFO */
5302 for (i = 0; i < 11; i++)
5303 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5304 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5305 if (val != 1) {
5306 BNX2X_ERR("clear of NIG failed\n");
5307 return -4;
5308 }
5309
5310 /* Reset and init BRB, PRS, NIG */
5311 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5312 msleep(50);
5313 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5314 msleep(50);
5315 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5316 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5317#ifndef BCM_ISCSI
5318 /* set NIC mode */
5319 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5320#endif
5321
5322 /* Enable inputs of parser neighbor blocks */
5323 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5324 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5325 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5326 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5327
5328 DP(NETIF_MSG_HW, "done\n");
5329
5330 return 0; /* OK */
5331}
5332
5333static void enable_blocks_attention(struct bnx2x *bp)
5334{
5335 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5336 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5337 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5338 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5339 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5340 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5341 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5342 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5343 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5344/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5345/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5346 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5347 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5348 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5349/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5350/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5351 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5352 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5353 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5354 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5355/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5356/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5357 if (CHIP_REV_IS_FPGA(bp))
5358 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5359 else
5360 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5361 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5362 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5363 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5364/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5365/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5366 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5367 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5368/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5369 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5370}
5371
34f80b04 5372
81f75bbf
EG
5373static void bnx2x_reset_common(struct bnx2x *bp)
5374{
5375 /* reset_common */
5376 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5377 0xd3ffff7f);
5378 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5379}
5380
34f80b04 5381static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5382{
a2fbb9ea 5383 u32 val, i;
a2fbb9ea 5384
34f80b04 5385 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5386
81f75bbf 5387 bnx2x_reset_common(bp);
34f80b04
EG
5388 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5389 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5390
34f80b04
EG
5391 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5392 if (CHIP_IS_E1H(bp))
5393 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5394
34f80b04
EG
5395 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5396 msleep(30);
5397 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5398
34f80b04
EG
5399 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5400 if (CHIP_IS_E1(bp)) {
5401 /* enable HW interrupt from PXP on USDM overflow
5402 bit 16 on INT_MASK_0 */
5403 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5404 }
a2fbb9ea 5405
34f80b04
EG
5406 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5407 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5408
5409#ifdef __BIG_ENDIAN
34f80b04
EG
5410 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5411 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5412 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5413 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5414 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
5415 /* make sure this value is 0 */
5416 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
5417
5418/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5419 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5420 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5421 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5422 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5423#endif
5424
34f80b04 5425 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5426#ifdef BCM_ISCSI
34f80b04
EG
5427 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5428 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5429 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5430#endif
5431
34f80b04
EG
5432 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5433 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5434
34f80b04
EG
5435 /* let the HW do it's magic ... */
5436 msleep(100);
5437 /* finish PXP init */
5438 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5439 if (val != 1) {
5440 BNX2X_ERR("PXP2 CFG failed\n");
5441 return -EBUSY;
5442 }
5443 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5444 if (val != 1) {
5445 BNX2X_ERR("PXP2 RD_INIT failed\n");
5446 return -EBUSY;
5447 }
a2fbb9ea 5448
34f80b04
EG
5449 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5450 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5451
34f80b04 5452 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
a2fbb9ea 5453
34f80b04
EG
5454 /* clean the DMAE memory */
5455 bp->dmae_ready = 1;
5456 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5457
34f80b04
EG
5458 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5459 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5460 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5461 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
a2fbb9ea 5462
34f80b04
EG
5463 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5464 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5465 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5466 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5467
5468 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5469 /* soft reset pulse */
5470 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5471 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5472
5473#ifdef BCM_ISCSI
34f80b04 5474 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
a2fbb9ea 5475#endif
a2fbb9ea 5476
34f80b04
EG
5477 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5478 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5479 if (!CHIP_REV_IS_SLOW(bp)) {
5480 /* enable hw interrupt from doorbell Q */
5481 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5482 }
a2fbb9ea 5483
34f80b04 5484 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
34f80b04 5485 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
26c8fa4d 5486 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3196a88a
EG
5487 /* set NIC mode */
5488 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
5489 if (CHIP_IS_E1H(bp))
5490 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5491
34f80b04
EG
5492 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5493 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5494 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5495 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
a2fbb9ea 5496
34f80b04
EG
5497 if (CHIP_IS_E1H(bp)) {
5498 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5499 STORM_INTMEM_SIZE_E1H/2);
5500 bnx2x_init_fill(bp,
5501 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5502 0, STORM_INTMEM_SIZE_E1H/2);
5503 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5504 STORM_INTMEM_SIZE_E1H/2);
5505 bnx2x_init_fill(bp,
5506 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5507 0, STORM_INTMEM_SIZE_E1H/2);
5508 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5509 STORM_INTMEM_SIZE_E1H/2);
5510 bnx2x_init_fill(bp,
5511 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5512 0, STORM_INTMEM_SIZE_E1H/2);
5513 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5514 STORM_INTMEM_SIZE_E1H/2);
5515 bnx2x_init_fill(bp,
5516 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5517 0, STORM_INTMEM_SIZE_E1H/2);
5518 } else { /* E1 */
ad8d3948
EG
5519 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5520 STORM_INTMEM_SIZE_E1);
5521 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5522 STORM_INTMEM_SIZE_E1);
5523 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5524 STORM_INTMEM_SIZE_E1);
5525 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5526 STORM_INTMEM_SIZE_E1);
34f80b04 5527 }
a2fbb9ea 5528
34f80b04
EG
5529 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5530 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5531 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5532 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
a2fbb9ea 5533
34f80b04
EG
5534 /* sync semi rtc */
5535 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5536 0x80000000);
5537 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5538 0x80000000);
a2fbb9ea 5539
34f80b04
EG
5540 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5541 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5542 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
a2fbb9ea 5543
34f80b04
EG
5544 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5545 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5546 REG_WR(bp, i, 0xc0cac01a);
5547 /* TODO: replace with something meaningful */
5548 }
8d9c5f34 5549 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
34f80b04 5550 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5551
34f80b04
EG
5552 if (sizeof(union cdu_context) != 1024)
5553 /* we currently assume that a context is 1024 bytes */
5554 printk(KERN_ALERT PFX "please adjust the size of"
5555 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5556
34f80b04
EG
5557 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5558 val = (4 << 24) + (0 << 12) + 1024;
5559 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5560 if (CHIP_IS_E1(bp)) {
5561 /* !!! fix pxp client crdit until excel update */
5562 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5563 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5564 }
a2fbb9ea 5565
34f80b04
EG
5566 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5567 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
5568 /* enable context validation interrupt from CFC */
5569 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5570
5571 /* set the thresholds to prevent CFC/CDU race */
5572 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 5573
34f80b04
EG
5574 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5575 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
a2fbb9ea 5576
34f80b04
EG
5577 /* PXPCS COMMON comes here */
5578 /* Reset PCIE errors for debug */
5579 REG_WR(bp, 0x2814, 0xffffffff);
5580 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5581
34f80b04
EG
5582 /* EMAC0 COMMON comes here */
5583 /* EMAC1 COMMON comes here */
5584 /* DBU COMMON comes here */
5585 /* DBG COMMON comes here */
5586
5587 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5588 if (CHIP_IS_E1H(bp)) {
5589 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5590 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5591 }
5592
5593 if (CHIP_REV_IS_SLOW(bp))
5594 msleep(200);
5595
5596 /* finish CFC init */
5597 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5598 if (val != 1) {
5599 BNX2X_ERR("CFC LL_INIT failed\n");
5600 return -EBUSY;
5601 }
5602 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5603 if (val != 1) {
5604 BNX2X_ERR("CFC AC_INIT failed\n");
5605 return -EBUSY;
5606 }
5607 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5608 if (val != 1) {
5609 BNX2X_ERR("CFC CAM_INIT failed\n");
5610 return -EBUSY;
5611 }
5612 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5613
34f80b04
EG
5614 /* read NIG statistic
5615 to see if this is our first up since powerup */
5616 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5617 val = *bnx2x_sp(bp, wb_data[0]);
5618
5619 /* do internal memory self test */
5620 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5621 BNX2X_ERR("internal mem self test failed\n");
5622 return -EBUSY;
5623 }
5624
5625 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 5626 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
34f80b04
EG
5627 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5628 /* Fan failure is indicated by SPIO 5 */
5629 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5630 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5631
5632 /* set to active low mode */
5633 val = REG_RD(bp, MISC_REG_SPIO_INT);
5634 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 5635 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 5636 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 5637
34f80b04
EG
5638 /* enable interrupt to signal the IGU */
5639 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5640 val |= (1 << MISC_REGISTERS_SPIO_5);
5641 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5642 break;
f1410647 5643
34f80b04
EG
5644 default:
5645 break;
5646 }
f1410647 5647
34f80b04
EG
5648 /* clear PXP2 attentions */
5649 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5650
34f80b04 5651 enable_blocks_attention(bp);
a2fbb9ea 5652
6bbca910
YR
5653 if (!BP_NOMCP(bp)) {
5654 bnx2x_acquire_phy_lock(bp);
5655 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5656 bnx2x_release_phy_lock(bp);
5657 } else
5658 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5659
34f80b04
EG
5660 return 0;
5661}
a2fbb9ea 5662
34f80b04
EG
5663static int bnx2x_init_port(struct bnx2x *bp)
5664{
5665 int port = BP_PORT(bp);
1c06328c 5666 u32 low, high;
34f80b04 5667 u32 val;
a2fbb9ea 5668
34f80b04
EG
5669 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5670
5671 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5672
5673 /* Port PXP comes here */
5674 /* Port PXP2 comes here */
a2fbb9ea
ET
5675#ifdef BCM_ISCSI
5676 /* Port0 1
5677 * Port1 385 */
5678 i++;
5679 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5680 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5681 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5682 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5683
5684 /* Port0 2
5685 * Port1 386 */
5686 i++;
5687 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5688 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5689 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5690 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5691
5692 /* Port0 3
5693 * Port1 387 */
5694 i++;
5695 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5696 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5697 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5698 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5699#endif
34f80b04 5700 /* Port CMs come here */
8d9c5f34
EG
5701 bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
5702 (port ? XCM_PORT1_END : XCM_PORT0_END));
a2fbb9ea
ET
5703
5704 /* Port QM comes here */
a2fbb9ea
ET
5705#ifdef BCM_ISCSI
5706 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5707 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5708
5709 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5710 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5711#endif
5712 /* Port DQ comes here */
1c06328c
EG
5713
5714 bnx2x_init_block(bp, (port ? BRB1_PORT1_START : BRB1_PORT0_START),
5715 (port ? BRB1_PORT1_END : BRB1_PORT0_END));
5716 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5717 /* no pause for emulation and FPGA */
5718 low = 0;
5719 high = 513;
5720 } else {
5721 if (IS_E1HMF(bp))
5722 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5723 else if (bp->dev->mtu > 4096) {
5724 if (bp->flags & ONE_PORT_FLAG)
5725 low = 160;
5726 else {
5727 val = bp->dev->mtu;
5728 /* (24*1024 + val*4)/256 */
5729 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5730 }
5731 } else
5732 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5733 high = low + 56; /* 14*1024/256 */
5734 }
5735 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5736 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5737
5738
ad8d3948 5739 /* Port PRS comes here */
a2fbb9ea
ET
5740 /* Port TSDM comes here */
5741 /* Port CSDM comes here */
5742 /* Port USDM comes here */
5743 /* Port XSDM comes here */
34f80b04
EG
5744 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5745 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5746 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5747 port ? USEM_PORT1_END : USEM_PORT0_END);
5748 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5749 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5750 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5751 port ? XSEM_PORT1_END : XSEM_PORT0_END);
a2fbb9ea 5752 /* Port UPB comes here */
34f80b04
EG
5753 /* Port XPB comes here */
5754
5755 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5756 port ? PBF_PORT1_END : PBF_PORT0_END);
a2fbb9ea
ET
5757
5758 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5759 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5760
5761 /* update threshold */
34f80b04 5762 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5763 /* update init credit */
34f80b04 5764 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5765
5766 /* probe changes */
34f80b04 5767 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5768 msleep(5);
34f80b04 5769 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5770
5771#ifdef BCM_ISCSI
5772 /* tell the searcher where the T2 table is */
5773 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5774
5775 wb_write[0] = U64_LO(bp->t2_mapping);
5776 wb_write[1] = U64_HI(bp->t2_mapping);
5777 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5778 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5779 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5780 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5781
5782 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5783 /* Port SRCH comes here */
5784#endif
5785 /* Port CDU comes here */
5786 /* Port CFC comes here */
34f80b04
EG
5787
5788 if (CHIP_IS_E1(bp)) {
5789 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5790 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5791 }
5792 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5793 port ? HC_PORT1_END : HC_PORT0_END);
5794
5795 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
a2fbb9ea 5796 MISC_AEU_PORT0_START,
34f80b04
EG
5797 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5798 /* init aeu_mask_attn_func_0/1:
5799 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5800 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5801 * bits 4-7 are used for "per vn group attention" */
5802 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5803 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5804
a2fbb9ea
ET
5805 /* Port PXPCS comes here */
5806 /* Port EMAC0 comes here */
5807 /* Port EMAC1 comes here */
5808 /* Port DBU comes here */
5809 /* Port DBG comes here */
34f80b04
EG
5810 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5811 port ? NIG_PORT1_END : NIG_PORT0_END);
5812
5813 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5814
5815 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
5816 /* 0x2 disable e1hov, 0x1 enable */
5817 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5818 (IS_E1HMF(bp) ? 0x1 : 0x2));
5819
1c06328c
EG
5820 /* support pause requests from USDM, TSDM and BRB */
5821 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
5822
5823 {
5824 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5825 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5826 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5827 }
34f80b04
EG
5828 }
5829
a2fbb9ea
ET
5830 /* Port MCP comes here */
5831 /* Port DMAE comes here */
5832
34f80b04 5833 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 5834 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
f1410647
ET
5835 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5836 /* add SPIO 5 to group 0 */
5837 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5838 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5839 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5840 break;
5841
5842 default:
5843 break;
5844 }
5845
c18487ee 5846 bnx2x__link_reset(bp);
a2fbb9ea 5847
34f80b04
EG
5848 return 0;
5849}
5850
5851#define ILT_PER_FUNC (768/2)
5852#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5853/* the phys address is shifted right 12 bits and has an added
5854 1=valid bit added to the 53rd bit
5855 then since this is a wide register(TM)
5856 we split it into two 32 bit writes
5857 */
5858#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5859#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5860#define PXP_ONE_ILT(x) (((x) << 10) | x)
5861#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5862
5863#define CNIC_ILT_LINES 0
5864
5865static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5866{
5867 int reg;
5868
5869 if (CHIP_IS_E1H(bp))
5870 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5871 else /* E1 */
5872 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5873
5874 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5875}
5876
5877static int bnx2x_init_func(struct bnx2x *bp)
5878{
5879 int port = BP_PORT(bp);
5880 int func = BP_FUNC(bp);
8badd27a 5881 u32 addr, val;
34f80b04
EG
5882 int i;
5883
5884 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5885
8badd27a
EG
5886 /* set MSI reconfigure capability */
5887 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5888 val = REG_RD(bp, addr);
5889 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5890 REG_WR(bp, addr, val);
5891
34f80b04
EG
5892 i = FUNC_ILT_BASE(func);
5893
5894 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5895 if (CHIP_IS_E1H(bp)) {
5896 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5897 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5898 } else /* E1 */
5899 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5900 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5901
5902
5903 if (CHIP_IS_E1H(bp)) {
5904 for (i = 0; i < 9; i++)
5905 bnx2x_init_block(bp,
5906 cm_start[func][i], cm_end[func][i]);
5907
5908 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5909 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5910 }
5911
5912 /* HC init per function */
5913 if (CHIP_IS_E1H(bp)) {
5914 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5915
5916 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5917 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5918 }
5919 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5920
c14423fe 5921 /* Reset PCIE errors for debug */
a2fbb9ea
ET
5922 REG_WR(bp, 0x2114, 0xffffffff);
5923 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 5924
34f80b04
EG
5925 return 0;
5926}
5927
5928static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5929{
5930 int i, rc = 0;
a2fbb9ea 5931
34f80b04
EG
5932 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5933 BP_FUNC(bp), load_code);
a2fbb9ea 5934
34f80b04
EG
5935 bp->dmae_ready = 0;
5936 mutex_init(&bp->dmae_mutex);
5937 bnx2x_gunzip_init(bp);
a2fbb9ea 5938
34f80b04
EG
5939 switch (load_code) {
5940 case FW_MSG_CODE_DRV_LOAD_COMMON:
5941 rc = bnx2x_init_common(bp);
5942 if (rc)
5943 goto init_hw_err;
5944 /* no break */
5945
5946 case FW_MSG_CODE_DRV_LOAD_PORT:
5947 bp->dmae_ready = 1;
5948 rc = bnx2x_init_port(bp);
5949 if (rc)
5950 goto init_hw_err;
5951 /* no break */
5952
5953 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5954 bp->dmae_ready = 1;
5955 rc = bnx2x_init_func(bp);
5956 if (rc)
5957 goto init_hw_err;
5958 break;
5959
5960 default:
5961 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5962 break;
5963 }
5964
5965 if (!BP_NOMCP(bp)) {
5966 int func = BP_FUNC(bp);
a2fbb9ea
ET
5967
5968 bp->fw_drv_pulse_wr_seq =
34f80b04 5969 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 5970 DRV_PULSE_SEQ_MASK);
34f80b04
EG
5971 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5972 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5973 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5974 } else
5975 bp->func_stx = 0;
a2fbb9ea 5976
34f80b04
EG
5977 /* this needs to be done before gunzip end */
5978 bnx2x_zero_def_sb(bp);
5979 for_each_queue(bp, i)
5980 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5981
5982init_hw_err:
5983 bnx2x_gunzip_end(bp);
5984
5985 return rc;
a2fbb9ea
ET
5986}
5987
c14423fe 5988/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
5989static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5990{
34f80b04 5991 int func = BP_FUNC(bp);
f1410647
ET
5992 u32 seq = ++bp->fw_seq;
5993 u32 rc = 0;
19680c48
EG
5994 u32 cnt = 1;
5995 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 5996
34f80b04 5997 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 5998 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 5999
19680c48
EG
6000 do {
6001 /* let the FW do it's magic ... */
6002 msleep(delay);
a2fbb9ea 6003
19680c48 6004 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 6005
19680c48
EG
6006 /* Give the FW up to 2 second (200*10ms) */
6007 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6008
6009 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6010 cnt*delay, rc, seq);
a2fbb9ea
ET
6011
6012 /* is this a reply to our command? */
6013 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6014 rc &= FW_MSG_CODE_MASK;
f1410647 6015
a2fbb9ea
ET
6016 } else {
6017 /* FW BUG! */
6018 BNX2X_ERR("FW failed to respond!\n");
6019 bnx2x_fw_dump(bp);
6020 rc = 0;
6021 }
f1410647 6022
a2fbb9ea
ET
6023 return rc;
6024}
6025
6026static void bnx2x_free_mem(struct bnx2x *bp)
6027{
6028
6029#define BNX2X_PCI_FREE(x, y, size) \
6030 do { \
6031 if (x) { \
6032 pci_free_consistent(bp->pdev, size, x, y); \
6033 x = NULL; \
6034 y = 0; \
6035 } \
6036 } while (0)
6037
6038#define BNX2X_FREE(x) \
6039 do { \
6040 if (x) { \
6041 vfree(x); \
6042 x = NULL; \
6043 } \
6044 } while (0)
6045
6046 int i;
6047
6048 /* fastpath */
555f6c78 6049 /* Common */
a2fbb9ea
ET
6050 for_each_queue(bp, i) {
6051
555f6c78 6052 /* status blocks */
a2fbb9ea
ET
6053 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6054 bnx2x_fp(bp, i, status_blk_mapping),
6055 sizeof(struct host_status_block) +
6056 sizeof(struct eth_tx_db_data));
555f6c78
EG
6057 }
6058 /* Rx */
6059 for_each_rx_queue(bp, i) {
a2fbb9ea 6060
555f6c78 6061 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6062 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6063 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6064 bnx2x_fp(bp, i, rx_desc_mapping),
6065 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6066
6067 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6068 bnx2x_fp(bp, i, rx_comp_mapping),
6069 sizeof(struct eth_fast_path_rx_cqe) *
6070 NUM_RCQ_BD);
a2fbb9ea 6071
7a9b2557 6072 /* SGE ring */
32626230 6073 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6074 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6075 bnx2x_fp(bp, i, rx_sge_mapping),
6076 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6077 }
555f6c78
EG
6078 /* Tx */
6079 for_each_tx_queue(bp, i) {
6080
6081 /* fastpath tx rings: tx_buf tx_desc */
6082 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6083 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6084 bnx2x_fp(bp, i, tx_desc_mapping),
6085 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6086 }
a2fbb9ea
ET
6087 /* end of fastpath */
6088
6089 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6090 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6091
6092 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6093 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
6094
6095#ifdef BCM_ISCSI
6096 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6097 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6098 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6099 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6100#endif
7a9b2557 6101 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6102
6103#undef BNX2X_PCI_FREE
6104#undef BNX2X_KFREE
6105}
6106
6107static int bnx2x_alloc_mem(struct bnx2x *bp)
6108{
6109
6110#define BNX2X_PCI_ALLOC(x, y, size) \
6111 do { \
6112 x = pci_alloc_consistent(bp->pdev, size, y); \
6113 if (x == NULL) \
6114 goto alloc_mem_err; \
6115 memset(x, 0, size); \
6116 } while (0)
6117
6118#define BNX2X_ALLOC(x, size) \
6119 do { \
6120 x = vmalloc(size); \
6121 if (x == NULL) \
6122 goto alloc_mem_err; \
6123 memset(x, 0, size); \
6124 } while (0)
6125
6126 int i;
6127
6128 /* fastpath */
555f6c78 6129 /* Common */
a2fbb9ea
ET
6130 for_each_queue(bp, i) {
6131 bnx2x_fp(bp, i, bp) = bp;
6132
555f6c78 6133 /* status blocks */
a2fbb9ea
ET
6134 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6135 &bnx2x_fp(bp, i, status_blk_mapping),
6136 sizeof(struct host_status_block) +
6137 sizeof(struct eth_tx_db_data));
555f6c78
EG
6138 }
6139 /* Rx */
6140 for_each_rx_queue(bp, i) {
a2fbb9ea 6141
555f6c78 6142 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6143 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6144 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6145 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6146 &bnx2x_fp(bp, i, rx_desc_mapping),
6147 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6148
6149 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6150 &bnx2x_fp(bp, i, rx_comp_mapping),
6151 sizeof(struct eth_fast_path_rx_cqe) *
6152 NUM_RCQ_BD);
6153
7a9b2557
VZ
6154 /* SGE ring */
6155 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6156 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6157 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6158 &bnx2x_fp(bp, i, rx_sge_mapping),
6159 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6160 }
555f6c78
EG
6161 /* Tx */
6162 for_each_tx_queue(bp, i) {
6163
6164 bnx2x_fp(bp, i, hw_tx_prods) =
6165 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6166
6167 bnx2x_fp(bp, i, tx_prods_mapping) =
6168 bnx2x_fp(bp, i, status_blk_mapping) +
6169 sizeof(struct host_status_block);
6170
6171 /* fastpath tx rings: tx_buf tx_desc */
6172 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6173 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6174 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6175 &bnx2x_fp(bp, i, tx_desc_mapping),
6176 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6177 }
a2fbb9ea
ET
6178 /* end of fastpath */
6179
6180 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6181 sizeof(struct host_def_status_block));
6182
6183 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6184 sizeof(struct bnx2x_slowpath));
6185
6186#ifdef BCM_ISCSI
6187 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6188
6189 /* Initialize T1 */
6190 for (i = 0; i < 64*1024; i += 64) {
6191 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6192 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6193 }
6194
6195 /* allocate searcher T2 table
6196 we allocate 1/4 of alloc num for T2
6197 (which is not entered into the ILT) */
6198 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6199
6200 /* Initialize T2 */
6201 for (i = 0; i < 16*1024; i += 64)
6202 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6203
c14423fe 6204 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
6205 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6206
6207 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6208 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6209
6210 /* QM queues (128*MAX_CONN) */
6211 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6212#endif
6213
6214 /* Slow path ring */
6215 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6216
6217 return 0;
6218
6219alloc_mem_err:
6220 bnx2x_free_mem(bp);
6221 return -ENOMEM;
6222
6223#undef BNX2X_PCI_ALLOC
6224#undef BNX2X_ALLOC
6225}
6226
6227static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6228{
6229 int i;
6230
555f6c78 6231 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
6232 struct bnx2x_fastpath *fp = &bp->fp[i];
6233
6234 u16 bd_cons = fp->tx_bd_cons;
6235 u16 sw_prod = fp->tx_pkt_prod;
6236 u16 sw_cons = fp->tx_pkt_cons;
6237
a2fbb9ea
ET
6238 while (sw_cons != sw_prod) {
6239 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6240 sw_cons++;
6241 }
6242 }
6243}
6244
6245static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6246{
6247 int i, j;
6248
555f6c78 6249 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6250 struct bnx2x_fastpath *fp = &bp->fp[j];
6251
a2fbb9ea
ET
6252 for (i = 0; i < NUM_RX_BD; i++) {
6253 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6254 struct sk_buff *skb = rx_buf->skb;
6255
6256 if (skb == NULL)
6257 continue;
6258
6259 pci_unmap_single(bp->pdev,
6260 pci_unmap_addr(rx_buf, mapping),
437cf2f1 6261 bp->rx_buf_size,
a2fbb9ea
ET
6262 PCI_DMA_FROMDEVICE);
6263
6264 rx_buf->skb = NULL;
6265 dev_kfree_skb(skb);
6266 }
7a9b2557 6267 if (!fp->disable_tpa)
32626230
EG
6268 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6269 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6270 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6271 }
6272}
6273
6274static void bnx2x_free_skbs(struct bnx2x *bp)
6275{
6276 bnx2x_free_tx_skbs(bp);
6277 bnx2x_free_rx_skbs(bp);
6278}
6279
6280static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6281{
34f80b04 6282 int i, offset = 1;
a2fbb9ea
ET
6283
6284 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6285 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6286 bp->msix_table[0].vector);
6287
6288 for_each_queue(bp, i) {
c14423fe 6289 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6290 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6291 bnx2x_fp(bp, i, state));
6292
34f80b04 6293 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6294 }
a2fbb9ea
ET
6295}
6296
6297static void bnx2x_free_irq(struct bnx2x *bp)
6298{
a2fbb9ea 6299 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6300 bnx2x_free_msix_irqs(bp);
6301 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6302 bp->flags &= ~USING_MSIX_FLAG;
6303
8badd27a
EG
6304 } else if (bp->flags & USING_MSI_FLAG) {
6305 free_irq(bp->pdev->irq, bp->dev);
6306 pci_disable_msi(bp->pdev);
6307 bp->flags &= ~USING_MSI_FLAG;
6308
a2fbb9ea
ET
6309 } else
6310 free_irq(bp->pdev->irq, bp->dev);
6311}
6312
6313static int bnx2x_enable_msix(struct bnx2x *bp)
6314{
8badd27a
EG
6315 int i, rc, offset = 1;
6316 int igu_vec = 0;
a2fbb9ea 6317
8badd27a
EG
6318 bp->msix_table[0].entry = igu_vec;
6319 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6320
34f80b04 6321 for_each_queue(bp, i) {
8badd27a 6322 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
6323 bp->msix_table[i + offset].entry = igu_vec;
6324 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6325 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6326 }
6327
34f80b04 6328 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 6329 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 6330 if (rc) {
8badd27a
EG
6331 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6332 return rc;
34f80b04 6333 }
8badd27a 6334
a2fbb9ea
ET
6335 bp->flags |= USING_MSIX_FLAG;
6336
6337 return 0;
a2fbb9ea
ET
6338}
6339
a2fbb9ea
ET
6340static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6341{
34f80b04 6342 int i, rc, offset = 1;
a2fbb9ea 6343
a2fbb9ea
ET
6344 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6345 bp->dev->name, bp->dev);
a2fbb9ea
ET
6346 if (rc) {
6347 BNX2X_ERR("request sp irq failed\n");
6348 return -EBUSY;
6349 }
6350
6351 for_each_queue(bp, i) {
555f6c78
EG
6352 struct bnx2x_fastpath *fp = &bp->fp[i];
6353
6354 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
34f80b04 6355 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 6356 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 6357 if (rc) {
555f6c78 6358 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
6359 bnx2x_free_msix_irqs(bp);
6360 return -EBUSY;
6361 }
6362
555f6c78 6363 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6364 }
6365
555f6c78
EG
6366 i = BNX2X_NUM_QUEUES(bp);
6367 if (is_multi(bp))
6368 printk(KERN_INFO PFX
6369 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6370 bp->dev->name, bp->msix_table[0].vector,
6371 bp->msix_table[offset].vector,
6372 bp->msix_table[offset + i - 1].vector);
6373 else
6374 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6375 bp->dev->name, bp->msix_table[0].vector,
6376 bp->msix_table[offset + i - 1].vector);
6377
a2fbb9ea 6378 return 0;
a2fbb9ea
ET
6379}
6380
8badd27a
EG
6381static int bnx2x_enable_msi(struct bnx2x *bp)
6382{
6383 int rc;
6384
6385 rc = pci_enable_msi(bp->pdev);
6386 if (rc) {
6387 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6388 return -1;
6389 }
6390 bp->flags |= USING_MSI_FLAG;
6391
6392 return 0;
6393}
6394
a2fbb9ea
ET
6395static int bnx2x_req_irq(struct bnx2x *bp)
6396{
8badd27a 6397 unsigned long flags;
34f80b04 6398 int rc;
a2fbb9ea 6399
8badd27a
EG
6400 if (bp->flags & USING_MSI_FLAG)
6401 flags = 0;
6402 else
6403 flags = IRQF_SHARED;
6404
6405 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 6406 bp->dev->name, bp->dev);
a2fbb9ea
ET
6407 if (!rc)
6408 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6409
6410 return rc;
a2fbb9ea
ET
6411}
6412
65abd74d
YG
6413static void bnx2x_napi_enable(struct bnx2x *bp)
6414{
6415 int i;
6416
555f6c78 6417 for_each_rx_queue(bp, i)
65abd74d
YG
6418 napi_enable(&bnx2x_fp(bp, i, napi));
6419}
6420
6421static void bnx2x_napi_disable(struct bnx2x *bp)
6422{
6423 int i;
6424
555f6c78 6425 for_each_rx_queue(bp, i)
65abd74d
YG
6426 napi_disable(&bnx2x_fp(bp, i, napi));
6427}
6428
6429static void bnx2x_netif_start(struct bnx2x *bp)
6430{
6431 if (atomic_dec_and_test(&bp->intr_sem)) {
6432 if (netif_running(bp->dev)) {
65abd74d
YG
6433 bnx2x_napi_enable(bp);
6434 bnx2x_int_enable(bp);
555f6c78
EG
6435 if (bp->state == BNX2X_STATE_OPEN)
6436 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
6437 }
6438 }
6439}
6440
f8ef6e44 6441static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6442{
f8ef6e44 6443 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 6444 bnx2x_napi_disable(bp);
65abd74d 6445 if (netif_running(bp->dev)) {
65abd74d
YG
6446 netif_tx_disable(bp->dev);
6447 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6448 }
6449}
6450
a2fbb9ea
ET
6451/*
6452 * Init service functions
6453 */
6454
3101c2bc 6455static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6456{
6457 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6458 int port = BP_PORT(bp);
a2fbb9ea
ET
6459
6460 /* CAM allocation
6461 * unicasts 0-31:port0 32-63:port1
6462 * multicast 64-127:port0 128-191:port1
6463 */
8d9c5f34 6464 config->hdr.length = 2;
af246401 6465 config->hdr.offset = port ? 32 : 0;
34f80b04 6466 config->hdr.client_id = BP_CL_ID(bp);
a2fbb9ea
ET
6467 config->hdr.reserved1 = 0;
6468
6469 /* primary MAC */
6470 config->config_table[0].cam_entry.msb_mac_addr =
6471 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6472 config->config_table[0].cam_entry.middle_mac_addr =
6473 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6474 config->config_table[0].cam_entry.lsb_mac_addr =
6475 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6476 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6477 if (set)
6478 config->config_table[0].target_table_entry.flags = 0;
6479 else
6480 CAM_INVALIDATE(config->config_table[0]);
a2fbb9ea
ET
6481 config->config_table[0].target_table_entry.client_id = 0;
6482 config->config_table[0].target_table_entry.vlan_id = 0;
6483
3101c2bc
YG
6484 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6485 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6486 config->config_table[0].cam_entry.msb_mac_addr,
6487 config->config_table[0].cam_entry.middle_mac_addr,
6488 config->config_table[0].cam_entry.lsb_mac_addr);
6489
6490 /* broadcast */
6491 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6492 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6493 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
34f80b04 6494 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6495 if (set)
6496 config->config_table[1].target_table_entry.flags =
a2fbb9ea 6497 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
6498 else
6499 CAM_INVALIDATE(config->config_table[1]);
a2fbb9ea
ET
6500 config->config_table[1].target_table_entry.client_id = 0;
6501 config->config_table[1].target_table_entry.vlan_id = 0;
6502
6503 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6504 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6505 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6506}
6507
3101c2bc 6508static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
6509{
6510 struct mac_configuration_cmd_e1h *config =
6511 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6512
3101c2bc 6513 if (set && (bp->state != BNX2X_STATE_OPEN)) {
34f80b04
EG
6514 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6515 return;
6516 }
6517
6518 /* CAM allocation for E1H
6519 * unicasts: by func number
6520 * multicast: 20+FUNC*20, 20 each
6521 */
8d9c5f34 6522 config->hdr.length = 1;
34f80b04
EG
6523 config->hdr.offset = BP_FUNC(bp);
6524 config->hdr.client_id = BP_CL_ID(bp);
6525 config->hdr.reserved1 = 0;
6526
6527 /* primary MAC */
6528 config->config_table[0].msb_mac_addr =
6529 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6530 config->config_table[0].middle_mac_addr =
6531 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6532 config->config_table[0].lsb_mac_addr =
6533 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6534 config->config_table[0].client_id = BP_L_ID(bp);
6535 config->config_table[0].vlan_id = 0;
6536 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6537 if (set)
6538 config->config_table[0].flags = BP_PORT(bp);
6539 else
6540 config->config_table[0].flags =
6541 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6542
3101c2bc
YG
6543 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6544 (set ? "setting" : "clearing"),
34f80b04
EG
6545 config->config_table[0].msb_mac_addr,
6546 config->config_table[0].middle_mac_addr,
6547 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6548
6549 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6550 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6551 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6552}
6553
a2fbb9ea
ET
6554static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6555 int *state_p, int poll)
6556{
6557 /* can take a while if any port is running */
34f80b04 6558 int cnt = 500;
a2fbb9ea 6559
c14423fe
ET
6560 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6561 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6562
6563 might_sleep();
34f80b04 6564 while (cnt--) {
a2fbb9ea
ET
6565 if (poll) {
6566 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6567 /* if index is different from 0
6568 * the reply for some commands will
3101c2bc 6569 * be on the non default queue
a2fbb9ea
ET
6570 */
6571 if (idx)
6572 bnx2x_rx_int(&bp->fp[idx], 10);
6573 }
a2fbb9ea 6574
3101c2bc 6575 mb(); /* state is changed by bnx2x_sp_event() */
49d66772 6576 if (*state_p == state)
a2fbb9ea
ET
6577 return 0;
6578
a2fbb9ea 6579 msleep(1);
a2fbb9ea
ET
6580 }
6581
a2fbb9ea 6582 /* timeout! */
49d66772
ET
6583 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6584 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6585#ifdef BNX2X_STOP_ON_ERROR
6586 bnx2x_panic();
6587#endif
a2fbb9ea 6588
49d66772 6589 return -EBUSY;
a2fbb9ea
ET
6590}
6591
6592static int bnx2x_setup_leading(struct bnx2x *bp)
6593{
34f80b04 6594 int rc;
a2fbb9ea 6595
c14423fe 6596 /* reset IGU state */
34f80b04 6597 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6598
6599 /* SETUP ramrod */
6600 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6601
34f80b04
EG
6602 /* Wait for completion */
6603 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6604
34f80b04 6605 return rc;
a2fbb9ea
ET
6606}
6607
6608static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6609{
555f6c78
EG
6610 struct bnx2x_fastpath *fp = &bp->fp[index];
6611
a2fbb9ea 6612 /* reset IGU state */
555f6c78 6613 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6614
228241eb 6615 /* SETUP ramrod */
555f6c78
EG
6616 fp->state = BNX2X_FP_STATE_OPENING;
6617 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6618 fp->cl_id, 0);
a2fbb9ea
ET
6619
6620 /* Wait for completion */
6621 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 6622 &(fp->state), 0);
a2fbb9ea
ET
6623}
6624
a2fbb9ea 6625static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 6626
8badd27a 6627static void bnx2x_set_int_mode(struct bnx2x *bp)
a2fbb9ea 6628{
555f6c78 6629 int num_queues;
a2fbb9ea 6630
8badd27a
EG
6631 switch (int_mode) {
6632 case INT_MODE_INTx:
6633 case INT_MODE_MSI:
555f6c78
EG
6634 num_queues = 1;
6635 bp->num_rx_queues = num_queues;
6636 bp->num_tx_queues = num_queues;
6637 DP(NETIF_MSG_IFUP,
6638 "set number of queues to %d\n", num_queues);
8badd27a
EG
6639 break;
6640
6641 case INT_MODE_MSIX:
6642 default:
555f6c78
EG
6643 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6644 num_queues = min_t(u32, num_online_cpus(),
6645 BNX2X_MAX_QUEUES(bp));
34f80b04 6646 else
555f6c78
EG
6647 num_queues = 1;
6648 bp->num_rx_queues = num_queues;
6649 bp->num_tx_queues = num_queues;
6650 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6651 " number of tx queues to %d\n",
6652 bp->num_rx_queues, bp->num_tx_queues);
2dfe0e1f
EG
6653 /* if we can't use MSI-X we only need one fp,
6654 * so try to enable MSI-X with the requested number of fp's
6655 * and fallback to MSI or legacy INTx with one fp
6656 */
8badd27a 6657 if (bnx2x_enable_msix(bp)) {
34f80b04 6658 /* failed to enable MSI-X */
555f6c78
EG
6659 num_queues = 1;
6660 bp->num_rx_queues = num_queues;
6661 bp->num_tx_queues = num_queues;
6662 if (bp->multi_mode)
6663 BNX2X_ERR("Multi requested but failed to "
6664 "enable MSI-X set number of "
6665 "queues to %d\n", num_queues);
a2fbb9ea 6666 }
8badd27a 6667 break;
a2fbb9ea 6668 }
555f6c78 6669 bp->dev->real_num_tx_queues = bp->num_tx_queues;
8badd27a
EG
6670}
6671
6672static void bnx2x_set_rx_mode(struct net_device *dev);
6673
6674/* must be called with rtnl_lock */
6675static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6676{
6677 u32 load_code;
6678 int i, rc = 0;
6679#ifdef BNX2X_STOP_ON_ERROR
6680 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6681 if (unlikely(bp->panic))
6682 return -EPERM;
6683#endif
6684
6685 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6686
6687 bnx2x_set_int_mode(bp);
c14423fe 6688
a2fbb9ea
ET
6689 if (bnx2x_alloc_mem(bp))
6690 return -ENOMEM;
6691
555f6c78 6692 for_each_rx_queue(bp, i)
7a9b2557
VZ
6693 bnx2x_fp(bp, i, disable_tpa) =
6694 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6695
555f6c78 6696 for_each_rx_queue(bp, i)
2dfe0e1f
EG
6697 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6698 bnx2x_poll, 128);
6699
6700#ifdef BNX2X_STOP_ON_ERROR
555f6c78 6701 for_each_rx_queue(bp, i) {
2dfe0e1f
EG
6702 struct bnx2x_fastpath *fp = &bp->fp[i];
6703
6704 fp->poll_no_work = 0;
6705 fp->poll_calls = 0;
6706 fp->poll_max_calls = 0;
6707 fp->poll_complete = 0;
6708 fp->poll_exit = 0;
6709 }
6710#endif
6711 bnx2x_napi_enable(bp);
6712
34f80b04
EG
6713 if (bp->flags & USING_MSIX_FLAG) {
6714 rc = bnx2x_req_msix_irqs(bp);
6715 if (rc) {
6716 pci_disable_msix(bp->pdev);
2dfe0e1f 6717 goto load_error1;
34f80b04
EG
6718 }
6719 } else {
8badd27a
EG
6720 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6721 bnx2x_enable_msi(bp);
34f80b04
EG
6722 bnx2x_ack_int(bp);
6723 rc = bnx2x_req_irq(bp);
6724 if (rc) {
2dfe0e1f 6725 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
8badd27a
EG
6726 if (bp->flags & USING_MSI_FLAG)
6727 pci_disable_msi(bp->pdev);
2dfe0e1f 6728 goto load_error1;
a2fbb9ea 6729 }
8badd27a
EG
6730 if (bp->flags & USING_MSI_FLAG) {
6731 bp->dev->irq = bp->pdev->irq;
6732 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6733 bp->dev->name, bp->pdev->irq);
6734 }
a2fbb9ea
ET
6735 }
6736
2dfe0e1f
EG
6737 /* Send LOAD_REQUEST command to MCP
6738 Returns the type of LOAD command:
6739 if it is the first port to be initialized
6740 common blocks should be initialized, otherwise - not
6741 */
6742 if (!BP_NOMCP(bp)) {
6743 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6744 if (!load_code) {
6745 BNX2X_ERR("MCP response failure, aborting\n");
6746 rc = -EBUSY;
6747 goto load_error2;
6748 }
6749 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6750 rc = -EBUSY; /* other port in diagnostic mode */
6751 goto load_error2;
6752 }
6753
6754 } else {
6755 int port = BP_PORT(bp);
6756
6757 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6758 load_count[0], load_count[1], load_count[2]);
6759 load_count[0]++;
6760 load_count[1 + port]++;
6761 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6762 load_count[0], load_count[1], load_count[2]);
6763 if (load_count[0] == 1)
6764 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6765 else if (load_count[1 + port] == 1)
6766 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6767 else
6768 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6769 }
6770
6771 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6772 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6773 bp->port.pmf = 1;
6774 else
6775 bp->port.pmf = 0;
6776 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 6777
a2fbb9ea 6778 /* Initialize HW */
34f80b04
EG
6779 rc = bnx2x_init_hw(bp, load_code);
6780 if (rc) {
a2fbb9ea 6781 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 6782 goto load_error2;
a2fbb9ea
ET
6783 }
6784
a2fbb9ea 6785 /* Setup NIC internals and enable interrupts */
471de716 6786 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
6787
6788 /* Send LOAD_DONE command to MCP */
34f80b04 6789 if (!BP_NOMCP(bp)) {
228241eb
ET
6790 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6791 if (!load_code) {
da5a662a 6792 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 6793 rc = -EBUSY;
2dfe0e1f 6794 goto load_error3;
a2fbb9ea
ET
6795 }
6796 }
6797
6798 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6799
34f80b04
EG
6800 rc = bnx2x_setup_leading(bp);
6801 if (rc) {
da5a662a 6802 BNX2X_ERR("Setup leading failed!\n");
2dfe0e1f 6803 goto load_error3;
34f80b04 6804 }
a2fbb9ea 6805
34f80b04
EG
6806 if (CHIP_IS_E1H(bp))
6807 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6808 BNX2X_ERR("!!! mf_cfg function disabled\n");
6809 bp->state = BNX2X_STATE_DISABLED;
6810 }
a2fbb9ea 6811
34f80b04
EG
6812 if (bp->state == BNX2X_STATE_OPEN)
6813 for_each_nondefault_queue(bp, i) {
6814 rc = bnx2x_setup_multi(bp, i);
6815 if (rc)
2dfe0e1f 6816 goto load_error3;
34f80b04 6817 }
a2fbb9ea 6818
34f80b04 6819 if (CHIP_IS_E1(bp))
3101c2bc 6820 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 6821 else
3101c2bc 6822 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04
EG
6823
6824 if (bp->port.pmf)
6825 bnx2x_initial_phy_init(bp);
a2fbb9ea
ET
6826
6827 /* Start fast path */
34f80b04
EG
6828 switch (load_mode) {
6829 case LOAD_NORMAL:
6830 /* Tx queue should be only reenabled */
555f6c78 6831 netif_tx_wake_all_queues(bp->dev);
2dfe0e1f 6832 /* Initialize the receive filter. */
34f80b04
EG
6833 bnx2x_set_rx_mode(bp->dev);
6834 break;
6835
6836 case LOAD_OPEN:
555f6c78 6837 netif_tx_start_all_queues(bp->dev);
2dfe0e1f 6838 /* Initialize the receive filter. */
34f80b04 6839 bnx2x_set_rx_mode(bp->dev);
34f80b04 6840 break;
a2fbb9ea 6841
34f80b04 6842 case LOAD_DIAG:
2dfe0e1f 6843 /* Initialize the receive filter. */
a2fbb9ea 6844 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
6845 bp->state = BNX2X_STATE_DIAG;
6846 break;
6847
6848 default:
6849 break;
a2fbb9ea
ET
6850 }
6851
34f80b04
EG
6852 if (!bp->port.pmf)
6853 bnx2x__link_status_update(bp);
6854
a2fbb9ea
ET
6855 /* start the timer */
6856 mod_timer(&bp->timer, jiffies + bp->current_interval);
6857
34f80b04 6858
a2fbb9ea
ET
6859 return 0;
6860
2dfe0e1f
EG
6861load_error3:
6862 bnx2x_int_disable_sync(bp, 1);
6863 if (!BP_NOMCP(bp)) {
6864 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
6865 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6866 }
6867 bp->port.pmf = 0;
7a9b2557
VZ
6868 /* Free SKBs, SGEs, TPA pool and driver internals */
6869 bnx2x_free_skbs(bp);
555f6c78 6870 for_each_rx_queue(bp, i)
3196a88a 6871 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 6872load_error2:
d1014634
YG
6873 /* Release IRQs */
6874 bnx2x_free_irq(bp);
2dfe0e1f
EG
6875load_error1:
6876 bnx2x_napi_disable(bp);
555f6c78 6877 for_each_rx_queue(bp, i)
7cde1c8b 6878 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
6879 bnx2x_free_mem(bp);
6880
6881 /* TBD we really need to reset the chip
6882 if we want to recover from this */
34f80b04 6883 return rc;
a2fbb9ea
ET
6884}
6885
6886static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6887{
555f6c78 6888 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
6889 int rc;
6890
c14423fe 6891 /* halt the connection */
555f6c78
EG
6892 fp->state = BNX2X_FP_STATE_HALTING;
6893 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 6894
34f80b04 6895 /* Wait for completion */
a2fbb9ea 6896 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 6897 &(fp->state), 1);
c14423fe 6898 if (rc) /* timeout */
a2fbb9ea
ET
6899 return rc;
6900
6901 /* delete cfc entry */
6902 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6903
34f80b04
EG
6904 /* Wait for completion */
6905 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 6906 &(fp->state), 1);
34f80b04 6907 return rc;
a2fbb9ea
ET
6908}
6909
da5a662a 6910static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 6911{
49d66772 6912 u16 dsb_sp_prod_idx;
c14423fe 6913 /* if the other port is handling traffic,
a2fbb9ea 6914 this can take a lot of time */
34f80b04
EG
6915 int cnt = 500;
6916 int rc;
a2fbb9ea
ET
6917
6918 might_sleep();
6919
6920 /* Send HALT ramrod */
6921 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
34f80b04 6922 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
a2fbb9ea 6923
34f80b04
EG
6924 /* Wait for completion */
6925 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6926 &(bp->fp[0].state), 1);
6927 if (rc) /* timeout */
da5a662a 6928 return rc;
a2fbb9ea 6929
49d66772 6930 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 6931
228241eb 6932 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
6933 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6934
49d66772 6935 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
6936 we are going to reset the chip anyway
6937 so there is not much to do if this times out
6938 */
34f80b04 6939 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
6940 if (!cnt) {
6941 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6942 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6943 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6944#ifdef BNX2X_STOP_ON_ERROR
6945 bnx2x_panic();
da5a662a
VZ
6946#else
6947 rc = -EBUSY;
34f80b04
EG
6948#endif
6949 break;
6950 }
6951 cnt--;
da5a662a 6952 msleep(1);
5650d9d4 6953 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
6954 }
6955 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6956 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
6957
6958 return rc;
a2fbb9ea
ET
6959}
6960
34f80b04
EG
6961static void bnx2x_reset_func(struct bnx2x *bp)
6962{
6963 int port = BP_PORT(bp);
6964 int func = BP_FUNC(bp);
6965 int base, i;
6966
6967 /* Configure IGU */
6968 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6969 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6970
34f80b04
EG
6971 /* Clear ILT */
6972 base = FUNC_ILT_BASE(func);
6973 for (i = base; i < base + ILT_PER_FUNC; i++)
6974 bnx2x_ilt_wr(bp, i, 0);
6975}
6976
6977static void bnx2x_reset_port(struct bnx2x *bp)
6978{
6979 int port = BP_PORT(bp);
6980 u32 val;
6981
6982 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6983
6984 /* Do not rcv packets to BRB */
6985 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6986 /* Do not direct rcv packets that are not for MCP to the BRB */
6987 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6988 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6989
6990 /* Configure AEU */
6991 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6992
6993 msleep(100);
6994 /* Check for BRB port occupancy */
6995 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6996 if (val)
6997 DP(NETIF_MSG_IFDOWN,
33471629 6998 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
6999
7000 /* TODO: Close Doorbell port? */
7001}
7002
34f80b04
EG
7003static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7004{
7005 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7006 BP_FUNC(bp), reset_code);
7007
7008 switch (reset_code) {
7009 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7010 bnx2x_reset_port(bp);
7011 bnx2x_reset_func(bp);
7012 bnx2x_reset_common(bp);
7013 break;
7014
7015 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7016 bnx2x_reset_port(bp);
7017 bnx2x_reset_func(bp);
7018 break;
7019
7020 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7021 bnx2x_reset_func(bp);
7022 break;
49d66772 7023
34f80b04
EG
7024 default:
7025 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7026 break;
7027 }
7028}
7029
33471629 7030/* must be called with rtnl_lock */
34f80b04 7031static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7032{
da5a662a 7033 int port = BP_PORT(bp);
a2fbb9ea 7034 u32 reset_code = 0;
da5a662a 7035 int i, cnt, rc;
a2fbb9ea
ET
7036
7037 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7038
228241eb
ET
7039 bp->rx_mode = BNX2X_RX_MODE_NONE;
7040 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7041
f8ef6e44 7042 bnx2x_netif_stop(bp, 1);
e94d8af3 7043
34f80b04
EG
7044 del_timer_sync(&bp->timer);
7045 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7046 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7047 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7048
70b9986c
EG
7049 /* Release IRQs */
7050 bnx2x_free_irq(bp);
7051
555f6c78
EG
7052 /* Wait until tx fastpath tasks complete */
7053 for_each_tx_queue(bp, i) {
228241eb
ET
7054 struct bnx2x_fastpath *fp = &bp->fp[i];
7055
34f80b04
EG
7056 cnt = 1000;
7057 smp_rmb();
e8b5fc51 7058 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7059
65abd74d 7060 bnx2x_tx_int(fp, 1000);
34f80b04
EG
7061 if (!cnt) {
7062 BNX2X_ERR("timeout waiting for queue[%d]\n",
7063 i);
7064#ifdef BNX2X_STOP_ON_ERROR
7065 bnx2x_panic();
7066 return -EBUSY;
7067#else
7068 break;
7069#endif
7070 }
7071 cnt--;
da5a662a 7072 msleep(1);
34f80b04
EG
7073 smp_rmb();
7074 }
228241eb 7075 }
da5a662a
VZ
7076 /* Give HW time to discard old tx messages */
7077 msleep(1);
a2fbb9ea 7078
3101c2bc
YG
7079 if (CHIP_IS_E1(bp)) {
7080 struct mac_configuration_cmd *config =
7081 bnx2x_sp(bp, mcast_config);
7082
7083 bnx2x_set_mac_addr_e1(bp, 0);
7084
8d9c5f34 7085 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7086 CAM_INVALIDATE(config->config_table[i]);
7087
8d9c5f34 7088 config->hdr.length = i;
3101c2bc
YG
7089 if (CHIP_REV_IS_SLOW(bp))
7090 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7091 else
7092 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7093 config->hdr.client_id = BP_CL_ID(bp);
7094 config->hdr.reserved1 = 0;
7095
7096 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7097 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7098 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7099
7100 } else { /* E1H */
65abd74d
YG
7101 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7102
3101c2bc
YG
7103 bnx2x_set_mac_addr_e1h(bp, 0);
7104
7105 for (i = 0; i < MC_HASH_SIZE; i++)
7106 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7107 }
7108
65abd74d
YG
7109 if (unload_mode == UNLOAD_NORMAL)
7110 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7111
7112 else if (bp->flags & NO_WOL_FLAG) {
7113 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7114 if (CHIP_IS_E1H(bp))
7115 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7116
7117 } else if (bp->wol) {
7118 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7119 u8 *mac_addr = bp->dev->dev_addr;
7120 u32 val;
7121 /* The mac address is written to entries 1-4 to
7122 preserve entry 0 which is used by the PMF */
7123 u8 entry = (BP_E1HVN(bp) + 1)*8;
7124
7125 val = (mac_addr[0] << 8) | mac_addr[1];
7126 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7127
7128 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7129 (mac_addr[4] << 8) | mac_addr[5];
7130 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7131
7132 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7133
7134 } else
7135 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7136
34f80b04
EG
7137 /* Close multi and leading connections
7138 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
7139 for_each_nondefault_queue(bp, i)
7140 if (bnx2x_stop_multi(bp, i))
228241eb 7141 goto unload_error;
a2fbb9ea 7142
da5a662a
VZ
7143 rc = bnx2x_stop_leading(bp);
7144 if (rc) {
34f80b04 7145 BNX2X_ERR("Stop leading failed!\n");
da5a662a 7146#ifdef BNX2X_STOP_ON_ERROR
34f80b04 7147 return -EBUSY;
da5a662a
VZ
7148#else
7149 goto unload_error;
34f80b04 7150#endif
228241eb
ET
7151 }
7152
7153unload_error:
34f80b04 7154 if (!BP_NOMCP(bp))
228241eb 7155 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7156 else {
7157 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
7158 load_count[0], load_count[1], load_count[2]);
7159 load_count[0]--;
da5a662a 7160 load_count[1 + port]--;
34f80b04
EG
7161 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
7162 load_count[0], load_count[1], load_count[2]);
7163 if (load_count[0] == 0)
7164 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 7165 else if (load_count[1 + port] == 0)
34f80b04
EG
7166 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7167 else
7168 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7169 }
a2fbb9ea 7170
34f80b04
EG
7171 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7172 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7173 bnx2x__link_reset(bp);
a2fbb9ea
ET
7174
7175 /* Reset the chip */
228241eb 7176 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7177
7178 /* Report UNLOAD_DONE to MCP */
34f80b04 7179 if (!BP_NOMCP(bp))
a2fbb9ea 7180 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9a035440 7181 bp->port.pmf = 0;
a2fbb9ea 7182
7a9b2557 7183 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 7184 bnx2x_free_skbs(bp);
555f6c78 7185 for_each_rx_queue(bp, i)
3196a88a 7186 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 7187 for_each_rx_queue(bp, i)
7cde1c8b 7188 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7189 bnx2x_free_mem(bp);
7190
7191 bp->state = BNX2X_STATE_CLOSED;
228241eb 7192
a2fbb9ea
ET
7193 netif_carrier_off(bp->dev);
7194
7195 return 0;
7196}
7197
34f80b04
EG
7198static void bnx2x_reset_task(struct work_struct *work)
7199{
7200 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7201
7202#ifdef BNX2X_STOP_ON_ERROR
7203 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7204 " so reset not done to allow debug dump,\n"
7205 KERN_ERR " you will need to reboot when done\n");
7206 return;
7207#endif
7208
7209 rtnl_lock();
7210
7211 if (!netif_running(bp->dev))
7212 goto reset_task_exit;
7213
7214 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7215 bnx2x_nic_load(bp, LOAD_NORMAL);
7216
7217reset_task_exit:
7218 rtnl_unlock();
7219}
7220
a2fbb9ea
ET
7221/* end of nic load/unload */
7222
7223/* ethtool_ops */
7224
7225/*
7226 * Init service functions
7227 */
7228
f1ef27ef
EG
7229static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7230{
7231 switch (func) {
7232 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7233 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7234 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7235 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7236 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7237 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7238 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7239 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7240 default:
7241 BNX2X_ERR("Unsupported function index: %d\n", func);
7242 return (u32)(-1);
7243 }
7244}
7245
7246static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7247{
7248 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7249
7250 /* Flush all outstanding writes */
7251 mmiowb();
7252
7253 /* Pretend to be function 0 */
7254 REG_WR(bp, reg, 0);
7255 /* Flush the GRC transaction (in the chip) */
7256 new_val = REG_RD(bp, reg);
7257 if (new_val != 0) {
7258 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7259 new_val);
7260 BUG();
7261 }
7262
7263 /* From now we are in the "like-E1" mode */
7264 bnx2x_int_disable(bp);
7265
7266 /* Flush all outstanding writes */
7267 mmiowb();
7268
7269 /* Restore the original funtion settings */
7270 REG_WR(bp, reg, orig_func);
7271 new_val = REG_RD(bp, reg);
7272 if (new_val != orig_func) {
7273 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7274 orig_func, new_val);
7275 BUG();
7276 }
7277}
7278
7279static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7280{
7281 if (CHIP_IS_E1H(bp))
7282 bnx2x_undi_int_disable_e1h(bp, func);
7283 else
7284 bnx2x_int_disable(bp);
7285}
7286
34f80b04
EG
7287static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7288{
7289 u32 val;
7290
7291 /* Check if there is any driver already loaded */
7292 val = REG_RD(bp, MISC_REG_UNPREPARED);
7293 if (val == 0x1) {
7294 /* Check if it is the UNDI driver
7295 * UNDI driver initializes CID offset for normal bell to 0x7
7296 */
4a37fb66 7297 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7298 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7299 if (val == 0x7) {
7300 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7301 /* save our func */
34f80b04 7302 int func = BP_FUNC(bp);
da5a662a
VZ
7303 u32 swap_en;
7304 u32 swap_val;
34f80b04 7305
b4661739
EG
7306 /* clear the UNDI indication */
7307 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7308
34f80b04
EG
7309 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7310
7311 /* try unload UNDI on port 0 */
7312 bp->func = 0;
da5a662a
VZ
7313 bp->fw_seq =
7314 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7315 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 7316 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7317
7318 /* if UNDI is loaded on the other port */
7319 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7320
da5a662a
VZ
7321 /* send "DONE" for previous unload */
7322 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7323
7324 /* unload UNDI on port 1 */
34f80b04 7325 bp->func = 1;
da5a662a
VZ
7326 bp->fw_seq =
7327 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7328 DRV_MSG_SEQ_NUMBER_MASK);
7329 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7330
7331 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7332 }
7333
b4661739
EG
7334 /* now it's safe to release the lock */
7335 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7336
f1ef27ef 7337 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
7338
7339 /* close input traffic and wait for it */
7340 /* Do not rcv packets to BRB */
7341 REG_WR(bp,
7342 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7343 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7344 /* Do not direct rcv packets that are not for MCP to
7345 * the BRB */
7346 REG_WR(bp,
7347 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7348 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7349 /* clear AEU */
7350 REG_WR(bp,
7351 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7352 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7353 msleep(10);
7354
7355 /* save NIG port swap info */
7356 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7357 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7358 /* reset device */
7359 REG_WR(bp,
7360 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7361 0xd3ffffff);
34f80b04
EG
7362 REG_WR(bp,
7363 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7364 0x1403);
da5a662a
VZ
7365 /* take the NIG out of reset and restore swap values */
7366 REG_WR(bp,
7367 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7368 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7369 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7370 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7371
7372 /* send unload done to the MCP */
7373 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7374
7375 /* restore our func and fw_seq */
7376 bp->func = func;
7377 bp->fw_seq =
7378 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7379 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7380
7381 } else
7382 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7383 }
7384}
7385
7386static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7387{
7388 u32 val, val2, val3, val4, id;
72ce58c3 7389 u16 pmc;
34f80b04
EG
7390
7391 /* Get the chip revision id and number. */
7392 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7393 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7394 id = ((val & 0xffff) << 16);
7395 val = REG_RD(bp, MISC_REG_CHIP_REV);
7396 id |= ((val & 0xf) << 12);
7397 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7398 id |= ((val & 0xff) << 4);
5a40e08e 7399 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7400 id |= (val & 0xf);
7401 bp->common.chip_id = id;
7402 bp->link_params.chip_id = bp->common.chip_id;
7403 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7404
1c06328c
EG
7405 val = (REG_RD(bp, 0x2874) & 0x55);
7406 if ((bp->common.chip_id & 0x1) ||
7407 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7408 bp->flags |= ONE_PORT_FLAG;
7409 BNX2X_DEV_INFO("single port device\n");
7410 }
7411
34f80b04
EG
7412 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7413 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7414 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7415 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7416 bp->common.flash_size, bp->common.flash_size);
7417
7418 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7419 bp->link_params.shmem_base = bp->common.shmem_base;
7420 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7421
7422 if (!bp->common.shmem_base ||
7423 (bp->common.shmem_base < 0xA0000) ||
7424 (bp->common.shmem_base >= 0xC0000)) {
7425 BNX2X_DEV_INFO("MCP not active\n");
7426 bp->flags |= NO_MCP_FLAG;
7427 return;
7428 }
7429
7430 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7431 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7432 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7433 BNX2X_ERR("BAD MCP validity signature\n");
7434
7435 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7436 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
7437
7438 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
7439 bp->common.hw_config, bp->common.board);
7440
7441 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7442 SHARED_HW_CFG_LED_MODE_MASK) >>
7443 SHARED_HW_CFG_LED_MODE_SHIFT);
7444
7445 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7446 bp->common.bc_ver = val;
7447 BNX2X_DEV_INFO("bc_ver %X\n", val);
7448 if (val < BNX2X_BC_VER) {
7449 /* for now only warn
7450 * later we might need to enforce this */
7451 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7452 " please upgrade BC\n", BNX2X_BC_VER, val);
7453 }
72ce58c3
EG
7454
7455 if (BP_E1HVN(bp) == 0) {
7456 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7457 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7458 } else {
7459 /* no WOL capability for E1HVN != 0 */
7460 bp->flags |= NO_WOL_FLAG;
7461 }
7462 BNX2X_DEV_INFO("%sWoL capable\n",
7463 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
34f80b04
EG
7464
7465 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7466 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7467 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7468 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7469
7470 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7471 val, val2, val3, val4);
7472}
7473
7474static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7475 u32 switch_cfg)
a2fbb9ea 7476{
34f80b04 7477 int port = BP_PORT(bp);
a2fbb9ea
ET
7478 u32 ext_phy_type;
7479
a2fbb9ea
ET
7480 switch (switch_cfg) {
7481 case SWITCH_CFG_1G:
7482 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7483
c18487ee
YR
7484 ext_phy_type =
7485 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7486 switch (ext_phy_type) {
7487 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7488 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7489 ext_phy_type);
7490
34f80b04
EG
7491 bp->port.supported |= (SUPPORTED_10baseT_Half |
7492 SUPPORTED_10baseT_Full |
7493 SUPPORTED_100baseT_Half |
7494 SUPPORTED_100baseT_Full |
7495 SUPPORTED_1000baseT_Full |
7496 SUPPORTED_2500baseX_Full |
7497 SUPPORTED_TP |
7498 SUPPORTED_FIBRE |
7499 SUPPORTED_Autoneg |
7500 SUPPORTED_Pause |
7501 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7502 break;
7503
7504 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7505 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7506 ext_phy_type);
7507
34f80b04
EG
7508 bp->port.supported |= (SUPPORTED_10baseT_Half |
7509 SUPPORTED_10baseT_Full |
7510 SUPPORTED_100baseT_Half |
7511 SUPPORTED_100baseT_Full |
7512 SUPPORTED_1000baseT_Full |
7513 SUPPORTED_TP |
7514 SUPPORTED_FIBRE |
7515 SUPPORTED_Autoneg |
7516 SUPPORTED_Pause |
7517 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7518 break;
7519
7520 default:
7521 BNX2X_ERR("NVRAM config error. "
7522 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7523 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7524 return;
7525 }
7526
34f80b04
EG
7527 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7528 port*0x10);
7529 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7530 break;
7531
7532 case SWITCH_CFG_10G:
7533 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7534
c18487ee
YR
7535 ext_phy_type =
7536 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7537 switch (ext_phy_type) {
7538 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7539 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7540 ext_phy_type);
7541
34f80b04
EG
7542 bp->port.supported |= (SUPPORTED_10baseT_Half |
7543 SUPPORTED_10baseT_Full |
7544 SUPPORTED_100baseT_Half |
7545 SUPPORTED_100baseT_Full |
7546 SUPPORTED_1000baseT_Full |
7547 SUPPORTED_2500baseX_Full |
7548 SUPPORTED_10000baseT_Full |
7549 SUPPORTED_TP |
7550 SUPPORTED_FIBRE |
7551 SUPPORTED_Autoneg |
7552 SUPPORTED_Pause |
7553 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7554 break;
7555
7556 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
f1410647 7557 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
34f80b04 7558 ext_phy_type);
f1410647 7559
34f80b04
EG
7560 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7561 SUPPORTED_FIBRE |
7562 SUPPORTED_Pause |
7563 SUPPORTED_Asym_Pause);
f1410647
ET
7564 break;
7565
a2fbb9ea 7566 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
f1410647
ET
7567 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7568 ext_phy_type);
7569
34f80b04
EG
7570 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7571 SUPPORTED_1000baseT_Full |
7572 SUPPORTED_FIBRE |
7573 SUPPORTED_Pause |
7574 SUPPORTED_Asym_Pause);
f1410647
ET
7575 break;
7576
7577 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7578 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
a2fbb9ea
ET
7579 ext_phy_type);
7580
34f80b04
EG
7581 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7582 SUPPORTED_1000baseT_Full |
7583 SUPPORTED_FIBRE |
7584 SUPPORTED_Autoneg |
7585 SUPPORTED_Pause |
7586 SUPPORTED_Asym_Pause);
f1410647
ET
7587 break;
7588
c18487ee
YR
7589 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7590 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7591 ext_phy_type);
7592
34f80b04
EG
7593 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7594 SUPPORTED_2500baseX_Full |
7595 SUPPORTED_1000baseT_Full |
7596 SUPPORTED_FIBRE |
7597 SUPPORTED_Autoneg |
7598 SUPPORTED_Pause |
7599 SUPPORTED_Asym_Pause);
c18487ee
YR
7600 break;
7601
f1410647
ET
7602 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7603 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7604 ext_phy_type);
7605
34f80b04
EG
7606 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7607 SUPPORTED_TP |
7608 SUPPORTED_Autoneg |
7609 SUPPORTED_Pause |
7610 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7611 break;
7612
c18487ee
YR
7613 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7614 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7615 bp->link_params.ext_phy_config);
7616 break;
7617
a2fbb9ea
ET
7618 default:
7619 BNX2X_ERR("NVRAM config error. "
7620 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7621 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7622 return;
7623 }
7624
34f80b04
EG
7625 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7626 port*0x18);
7627 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7628
a2fbb9ea
ET
7629 break;
7630
7631 default:
7632 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7633 bp->port.link_config);
a2fbb9ea
ET
7634 return;
7635 }
34f80b04 7636 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7637
7638 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7639 if (!(bp->link_params.speed_cap_mask &
7640 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7641 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7642
c18487ee
YR
7643 if (!(bp->link_params.speed_cap_mask &
7644 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7645 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7646
c18487ee
YR
7647 if (!(bp->link_params.speed_cap_mask &
7648 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7649 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7650
c18487ee
YR
7651 if (!(bp->link_params.speed_cap_mask &
7652 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7653 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7654
c18487ee
YR
7655 if (!(bp->link_params.speed_cap_mask &
7656 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7657 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7658 SUPPORTED_1000baseT_Full);
a2fbb9ea 7659
c18487ee
YR
7660 if (!(bp->link_params.speed_cap_mask &
7661 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7662 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7663
c18487ee
YR
7664 if (!(bp->link_params.speed_cap_mask &
7665 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7666 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7667
34f80b04 7668 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7669}
7670
34f80b04 7671static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7672{
c18487ee 7673 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7674
34f80b04 7675 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7676 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7677 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7678 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7679 bp->port.advertising = bp->port.supported;
a2fbb9ea 7680 } else {
c18487ee
YR
7681 u32 ext_phy_type =
7682 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7683
7684 if ((ext_phy_type ==
7685 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7686 (ext_phy_type ==
7687 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7688 /* force 10G, no AN */
c18487ee 7689 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7690 bp->port.advertising =
a2fbb9ea
ET
7691 (ADVERTISED_10000baseT_Full |
7692 ADVERTISED_FIBRE);
7693 break;
7694 }
7695 BNX2X_ERR("NVRAM config error. "
7696 "Invalid link_config 0x%x"
7697 " Autoneg not supported\n",
34f80b04 7698 bp->port.link_config);
a2fbb9ea
ET
7699 return;
7700 }
7701 break;
7702
7703 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7704 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7705 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7706 bp->port.advertising = (ADVERTISED_10baseT_Full |
7707 ADVERTISED_TP);
a2fbb9ea
ET
7708 } else {
7709 BNX2X_ERR("NVRAM config error. "
7710 "Invalid link_config 0x%x"
7711 " speed_cap_mask 0x%x\n",
34f80b04 7712 bp->port.link_config,
c18487ee 7713 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7714 return;
7715 }
7716 break;
7717
7718 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7719 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7720 bp->link_params.req_line_speed = SPEED_10;
7721 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7722 bp->port.advertising = (ADVERTISED_10baseT_Half |
7723 ADVERTISED_TP);
a2fbb9ea
ET
7724 } else {
7725 BNX2X_ERR("NVRAM config error. "
7726 "Invalid link_config 0x%x"
7727 " speed_cap_mask 0x%x\n",
34f80b04 7728 bp->port.link_config,
c18487ee 7729 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7730 return;
7731 }
7732 break;
7733
7734 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7735 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7736 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7737 bp->port.advertising = (ADVERTISED_100baseT_Full |
7738 ADVERTISED_TP);
a2fbb9ea
ET
7739 } else {
7740 BNX2X_ERR("NVRAM config error. "
7741 "Invalid link_config 0x%x"
7742 " speed_cap_mask 0x%x\n",
34f80b04 7743 bp->port.link_config,
c18487ee 7744 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7745 return;
7746 }
7747 break;
7748
7749 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7750 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7751 bp->link_params.req_line_speed = SPEED_100;
7752 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7753 bp->port.advertising = (ADVERTISED_100baseT_Half |
7754 ADVERTISED_TP);
a2fbb9ea
ET
7755 } else {
7756 BNX2X_ERR("NVRAM config error. "
7757 "Invalid link_config 0x%x"
7758 " speed_cap_mask 0x%x\n",
34f80b04 7759 bp->port.link_config,
c18487ee 7760 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7761 return;
7762 }
7763 break;
7764
7765 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7766 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7767 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7768 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7769 ADVERTISED_TP);
a2fbb9ea
ET
7770 } else {
7771 BNX2X_ERR("NVRAM config error. "
7772 "Invalid link_config 0x%x"
7773 " speed_cap_mask 0x%x\n",
34f80b04 7774 bp->port.link_config,
c18487ee 7775 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7776 return;
7777 }
7778 break;
7779
7780 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 7781 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 7782 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
7783 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7784 ADVERTISED_TP);
a2fbb9ea
ET
7785 } else {
7786 BNX2X_ERR("NVRAM config error. "
7787 "Invalid link_config 0x%x"
7788 " speed_cap_mask 0x%x\n",
34f80b04 7789 bp->port.link_config,
c18487ee 7790 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7791 return;
7792 }
7793 break;
7794
7795 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7796 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7797 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 7798 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 7799 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
7800 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7801 ADVERTISED_FIBRE);
a2fbb9ea
ET
7802 } else {
7803 BNX2X_ERR("NVRAM config error. "
7804 "Invalid link_config 0x%x"
7805 " speed_cap_mask 0x%x\n",
34f80b04 7806 bp->port.link_config,
c18487ee 7807 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7808 return;
7809 }
7810 break;
7811
7812 default:
7813 BNX2X_ERR("NVRAM config error. "
7814 "BAD link speed link_config 0x%x\n",
34f80b04 7815 bp->port.link_config);
c18487ee 7816 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7817 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
7818 break;
7819 }
a2fbb9ea 7820
34f80b04
EG
7821 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7822 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 7823 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 7824 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 7825 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 7826
c18487ee 7827 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 7828 " advertising 0x%x\n",
c18487ee
YR
7829 bp->link_params.req_line_speed,
7830 bp->link_params.req_duplex,
34f80b04 7831 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
7832}
7833
34f80b04 7834static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 7835{
34f80b04
EG
7836 int port = BP_PORT(bp);
7837 u32 val, val2;
a2fbb9ea 7838
c18487ee 7839 bp->link_params.bp = bp;
34f80b04 7840 bp->link_params.port = port;
c18487ee 7841
c18487ee 7842 bp->link_params.serdes_config =
f1410647 7843 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
c18487ee 7844 bp->link_params.lane_config =
a2fbb9ea 7845 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 7846 bp->link_params.ext_phy_config =
a2fbb9ea
ET
7847 SHMEM_RD(bp,
7848 dev_info.port_hw_config[port].external_phy_config);
c18487ee 7849 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
7850 SHMEM_RD(bp,
7851 dev_info.port_hw_config[port].speed_capability_mask);
7852
34f80b04 7853 bp->port.link_config =
a2fbb9ea
ET
7854 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7855
34f80b04
EG
7856 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7857 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7858 " link_config 0x%08x\n",
c18487ee
YR
7859 bp->link_params.serdes_config,
7860 bp->link_params.lane_config,
7861 bp->link_params.ext_phy_config,
34f80b04 7862 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 7863
34f80b04 7864 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
7865 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7866 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
7867
7868 bnx2x_link_settings_requested(bp);
7869
7870 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7871 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7872 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7873 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7874 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7875 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7876 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7877 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
7878 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7879 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
7880}
7881
7882static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7883{
7884 int func = BP_FUNC(bp);
7885 u32 val, val2;
7886 int rc = 0;
a2fbb9ea 7887
34f80b04 7888 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 7889
34f80b04
EG
7890 bp->e1hov = 0;
7891 bp->e1hmf = 0;
7892 if (CHIP_IS_E1H(bp)) {
7893 bp->mf_config =
7894 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 7895
3196a88a
EG
7896 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7897 FUNC_MF_CFG_E1HOV_TAG_MASK);
34f80b04 7898 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 7899
34f80b04
EG
7900 bp->e1hov = val;
7901 bp->e1hmf = 1;
7902 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7903 "(0x%04x)\n",
7904 func, bp->e1hov, bp->e1hov);
7905 } else {
7906 BNX2X_DEV_INFO("Single function mode\n");
7907 if (BP_E1HVN(bp)) {
7908 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7909 " aborting\n", func);
7910 rc = -EPERM;
7911 }
7912 }
7913 }
a2fbb9ea 7914
34f80b04
EG
7915 if (!BP_NOMCP(bp)) {
7916 bnx2x_get_port_hwinfo(bp);
7917
7918 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7919 DRV_MSG_SEQ_NUMBER_MASK);
7920 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7921 }
7922
7923 if (IS_E1HMF(bp)) {
7924 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7925 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7926 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7927 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7928 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7929 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7930 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7931 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7932 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7933 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7934 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7935 ETH_ALEN);
7936 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7937 ETH_ALEN);
a2fbb9ea 7938 }
34f80b04
EG
7939
7940 return rc;
a2fbb9ea
ET
7941 }
7942
34f80b04
EG
7943 if (BP_NOMCP(bp)) {
7944 /* only supposed to happen on emulation/FPGA */
33471629 7945 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
7946 random_ether_addr(bp->dev->dev_addr);
7947 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7948 }
a2fbb9ea 7949
34f80b04
EG
7950 return rc;
7951}
7952
7953static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7954{
7955 int func = BP_FUNC(bp);
7956 int rc;
7957
da5a662a
VZ
7958 /* Disable interrupt handling until HW is initialized */
7959 atomic_set(&bp->intr_sem, 1);
7960
34f80b04 7961 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 7962
1cf167f2 7963 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
7964 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7965
7966 rc = bnx2x_get_hwinfo(bp);
7967
7968 /* need to reset chip if undi was active */
7969 if (!BP_NOMCP(bp))
7970 bnx2x_undi_unload(bp);
7971
7972 if (CHIP_REV_IS_FPGA(bp))
7973 printk(KERN_ERR PFX "FPGA detected\n");
7974
7975 if (BP_NOMCP(bp) && (func == 0))
7976 printk(KERN_ERR PFX
7977 "MCP disabled, must load devices in order!\n");
7978
555f6c78 7979 /* Set multi queue mode */
8badd27a
EG
7980 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
7981 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 7982 printk(KERN_ERR PFX
8badd27a 7983 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
7984 multi_mode = ETH_RSS_MODE_DISABLED;
7985 }
7986 bp->multi_mode = multi_mode;
7987
7988
7a9b2557
VZ
7989 /* Set TPA flags */
7990 if (disable_tpa) {
7991 bp->flags &= ~TPA_ENABLE_FLAG;
7992 bp->dev->features &= ~NETIF_F_LRO;
7993 } else {
7994 bp->flags |= TPA_ENABLE_FLAG;
7995 bp->dev->features |= NETIF_F_LRO;
7996 }
7997
7998
34f80b04
EG
7999 bp->tx_ring_size = MAX_TX_AVAIL;
8000 bp->rx_ring_size = MAX_RX_AVAIL;
8001
8002 bp->rx_csum = 1;
8003 bp->rx_offset = 0;
8004
8005 bp->tx_ticks = 50;
8006 bp->rx_ticks = 25;
8007
34f80b04
EG
8008 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8009 bp->current_interval = (poll ? poll : bp->timer_interval);
8010
8011 init_timer(&bp->timer);
8012 bp->timer.expires = jiffies + bp->current_interval;
8013 bp->timer.data = (unsigned long) bp;
8014 bp->timer.function = bnx2x_timer;
8015
8016 return rc;
a2fbb9ea
ET
8017}
8018
8019/*
8020 * ethtool service functions
8021 */
8022
8023/* All ethtool functions called with rtnl_lock */
8024
8025static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8026{
8027 struct bnx2x *bp = netdev_priv(dev);
8028
34f80b04
EG
8029 cmd->supported = bp->port.supported;
8030 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
8031
8032 if (netif_carrier_ok(dev)) {
c18487ee
YR
8033 cmd->speed = bp->link_vars.line_speed;
8034 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 8035 } else {
c18487ee
YR
8036 cmd->speed = bp->link_params.req_line_speed;
8037 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 8038 }
34f80b04
EG
8039 if (IS_E1HMF(bp)) {
8040 u16 vn_max_rate;
8041
8042 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8043 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8044 if (vn_max_rate < cmd->speed)
8045 cmd->speed = vn_max_rate;
8046 }
a2fbb9ea 8047
c18487ee
YR
8048 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8049 u32 ext_phy_type =
8050 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
8051
8052 switch (ext_phy_type) {
8053 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8054 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8055 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8056 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 8057 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
f1410647
ET
8058 cmd->port = PORT_FIBRE;
8059 break;
8060
8061 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8062 cmd->port = PORT_TP;
8063 break;
8064
c18487ee
YR
8065 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8066 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8067 bp->link_params.ext_phy_config);
8068 break;
8069
f1410647
ET
8070 default:
8071 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
8072 bp->link_params.ext_phy_config);
8073 break;
f1410647
ET
8074 }
8075 } else
a2fbb9ea 8076 cmd->port = PORT_TP;
a2fbb9ea 8077
34f80b04 8078 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
8079 cmd->transceiver = XCVR_INTERNAL;
8080
c18487ee 8081 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 8082 cmd->autoneg = AUTONEG_ENABLE;
f1410647 8083 else
a2fbb9ea 8084 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
8085
8086 cmd->maxtxpkt = 0;
8087 cmd->maxrxpkt = 0;
8088
8089 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8090 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8091 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8092 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8093 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8094 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8095 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8096
8097 return 0;
8098}
8099
8100static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8101{
8102 struct bnx2x *bp = netdev_priv(dev);
8103 u32 advertising;
8104
34f80b04
EG
8105 if (IS_E1HMF(bp))
8106 return 0;
8107
a2fbb9ea
ET
8108 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8109 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8110 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8111 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8112 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8113 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8114 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8115
a2fbb9ea 8116 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
8117 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8118 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 8119 return -EINVAL;
f1410647 8120 }
a2fbb9ea
ET
8121
8122 /* advertise the requested speed and duplex if supported */
34f80b04 8123 cmd->advertising &= bp->port.supported;
a2fbb9ea 8124
c18487ee
YR
8125 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8126 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
8127 bp->port.advertising |= (ADVERTISED_Autoneg |
8128 cmd->advertising);
a2fbb9ea
ET
8129
8130 } else { /* forced speed */
8131 /* advertise the requested speed and duplex if supported */
8132 switch (cmd->speed) {
8133 case SPEED_10:
8134 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8135 if (!(bp->port.supported &
f1410647
ET
8136 SUPPORTED_10baseT_Full)) {
8137 DP(NETIF_MSG_LINK,
8138 "10M full not supported\n");
a2fbb9ea 8139 return -EINVAL;
f1410647 8140 }
a2fbb9ea
ET
8141
8142 advertising = (ADVERTISED_10baseT_Full |
8143 ADVERTISED_TP);
8144 } else {
34f80b04 8145 if (!(bp->port.supported &
f1410647
ET
8146 SUPPORTED_10baseT_Half)) {
8147 DP(NETIF_MSG_LINK,
8148 "10M half not supported\n");
a2fbb9ea 8149 return -EINVAL;
f1410647 8150 }
a2fbb9ea
ET
8151
8152 advertising = (ADVERTISED_10baseT_Half |
8153 ADVERTISED_TP);
8154 }
8155 break;
8156
8157 case SPEED_100:
8158 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8159 if (!(bp->port.supported &
f1410647
ET
8160 SUPPORTED_100baseT_Full)) {
8161 DP(NETIF_MSG_LINK,
8162 "100M full not supported\n");
a2fbb9ea 8163 return -EINVAL;
f1410647 8164 }
a2fbb9ea
ET
8165
8166 advertising = (ADVERTISED_100baseT_Full |
8167 ADVERTISED_TP);
8168 } else {
34f80b04 8169 if (!(bp->port.supported &
f1410647
ET
8170 SUPPORTED_100baseT_Half)) {
8171 DP(NETIF_MSG_LINK,
8172 "100M half not supported\n");
a2fbb9ea 8173 return -EINVAL;
f1410647 8174 }
a2fbb9ea
ET
8175
8176 advertising = (ADVERTISED_100baseT_Half |
8177 ADVERTISED_TP);
8178 }
8179 break;
8180
8181 case SPEED_1000:
f1410647
ET
8182 if (cmd->duplex != DUPLEX_FULL) {
8183 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 8184 return -EINVAL;
f1410647 8185 }
a2fbb9ea 8186
34f80b04 8187 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 8188 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 8189 return -EINVAL;
f1410647 8190 }
a2fbb9ea
ET
8191
8192 advertising = (ADVERTISED_1000baseT_Full |
8193 ADVERTISED_TP);
8194 break;
8195
8196 case SPEED_2500:
f1410647
ET
8197 if (cmd->duplex != DUPLEX_FULL) {
8198 DP(NETIF_MSG_LINK,
8199 "2.5G half not supported\n");
a2fbb9ea 8200 return -EINVAL;
f1410647 8201 }
a2fbb9ea 8202
34f80b04 8203 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
8204 DP(NETIF_MSG_LINK,
8205 "2.5G full not supported\n");
a2fbb9ea 8206 return -EINVAL;
f1410647 8207 }
a2fbb9ea 8208
f1410647 8209 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
8210 ADVERTISED_TP);
8211 break;
8212
8213 case SPEED_10000:
f1410647
ET
8214 if (cmd->duplex != DUPLEX_FULL) {
8215 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 8216 return -EINVAL;
f1410647 8217 }
a2fbb9ea 8218
34f80b04 8219 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 8220 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 8221 return -EINVAL;
f1410647 8222 }
a2fbb9ea
ET
8223
8224 advertising = (ADVERTISED_10000baseT_Full |
8225 ADVERTISED_FIBRE);
8226 break;
8227
8228 default:
f1410647 8229 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
8230 return -EINVAL;
8231 }
8232
c18487ee
YR
8233 bp->link_params.req_line_speed = cmd->speed;
8234 bp->link_params.req_duplex = cmd->duplex;
34f80b04 8235 bp->port.advertising = advertising;
a2fbb9ea
ET
8236 }
8237
c18487ee 8238 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 8239 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 8240 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 8241 bp->port.advertising);
a2fbb9ea 8242
34f80b04 8243 if (netif_running(dev)) {
bb2a0f7a 8244 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8245 bnx2x_link_set(bp);
8246 }
a2fbb9ea
ET
8247
8248 return 0;
8249}
8250
c18487ee
YR
8251#define PHY_FW_VER_LEN 10
8252
a2fbb9ea
ET
8253static void bnx2x_get_drvinfo(struct net_device *dev,
8254 struct ethtool_drvinfo *info)
8255{
8256 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 8257 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
8258
8259 strcpy(info->driver, DRV_MODULE_NAME);
8260 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
8261
8262 phy_fw_ver[0] = '\0';
34f80b04 8263 if (bp->port.pmf) {
4a37fb66 8264 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8265 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8266 (bp->state != BNX2X_STATE_CLOSED),
8267 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 8268 bnx2x_release_phy_lock(bp);
34f80b04 8269 }
c18487ee 8270
f0e53a84
EG
8271 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8272 (bp->common.bc_ver & 0xff0000) >> 16,
8273 (bp->common.bc_ver & 0xff00) >> 8,
8274 (bp->common.bc_ver & 0xff),
8275 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
8276 strcpy(info->bus_info, pci_name(bp->pdev));
8277 info->n_stats = BNX2X_NUM_STATS;
8278 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 8279 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
8280 info->regdump_len = 0;
8281}
8282
8283static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8284{
8285 struct bnx2x *bp = netdev_priv(dev);
8286
8287 if (bp->flags & NO_WOL_FLAG) {
8288 wol->supported = 0;
8289 wol->wolopts = 0;
8290 } else {
8291 wol->supported = WAKE_MAGIC;
8292 if (bp->wol)
8293 wol->wolopts = WAKE_MAGIC;
8294 else
8295 wol->wolopts = 0;
8296 }
8297 memset(&wol->sopass, 0, sizeof(wol->sopass));
8298}
8299
8300static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8301{
8302 struct bnx2x *bp = netdev_priv(dev);
8303
8304 if (wol->wolopts & ~WAKE_MAGIC)
8305 return -EINVAL;
8306
8307 if (wol->wolopts & WAKE_MAGIC) {
8308 if (bp->flags & NO_WOL_FLAG)
8309 return -EINVAL;
8310
8311 bp->wol = 1;
34f80b04 8312 } else
a2fbb9ea 8313 bp->wol = 0;
34f80b04 8314
a2fbb9ea
ET
8315 return 0;
8316}
8317
8318static u32 bnx2x_get_msglevel(struct net_device *dev)
8319{
8320 struct bnx2x *bp = netdev_priv(dev);
8321
8322 return bp->msglevel;
8323}
8324
8325static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8326{
8327 struct bnx2x *bp = netdev_priv(dev);
8328
8329 if (capable(CAP_NET_ADMIN))
8330 bp->msglevel = level;
8331}
8332
8333static int bnx2x_nway_reset(struct net_device *dev)
8334{
8335 struct bnx2x *bp = netdev_priv(dev);
8336
34f80b04
EG
8337 if (!bp->port.pmf)
8338 return 0;
a2fbb9ea 8339
34f80b04 8340 if (netif_running(dev)) {
bb2a0f7a 8341 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8342 bnx2x_link_set(bp);
8343 }
a2fbb9ea
ET
8344
8345 return 0;
8346}
8347
8348static int bnx2x_get_eeprom_len(struct net_device *dev)
8349{
8350 struct bnx2x *bp = netdev_priv(dev);
8351
34f80b04 8352 return bp->common.flash_size;
a2fbb9ea
ET
8353}
8354
8355static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8356{
34f80b04 8357 int port = BP_PORT(bp);
a2fbb9ea
ET
8358 int count, i;
8359 u32 val = 0;
8360
8361 /* adjust timeout for emulation/FPGA */
8362 count = NVRAM_TIMEOUT_COUNT;
8363 if (CHIP_REV_IS_SLOW(bp))
8364 count *= 100;
8365
8366 /* request access to nvram interface */
8367 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8368 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8369
8370 for (i = 0; i < count*10; i++) {
8371 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8372 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8373 break;
8374
8375 udelay(5);
8376 }
8377
8378 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 8379 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
8380 return -EBUSY;
8381 }
8382
8383 return 0;
8384}
8385
8386static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8387{
34f80b04 8388 int port = BP_PORT(bp);
a2fbb9ea
ET
8389 int count, i;
8390 u32 val = 0;
8391
8392 /* adjust timeout for emulation/FPGA */
8393 count = NVRAM_TIMEOUT_COUNT;
8394 if (CHIP_REV_IS_SLOW(bp))
8395 count *= 100;
8396
8397 /* relinquish nvram interface */
8398 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8399 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8400
8401 for (i = 0; i < count*10; i++) {
8402 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8403 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8404 break;
8405
8406 udelay(5);
8407 }
8408
8409 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 8410 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
8411 return -EBUSY;
8412 }
8413
8414 return 0;
8415}
8416
8417static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8418{
8419 u32 val;
8420
8421 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8422
8423 /* enable both bits, even on read */
8424 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8425 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8426 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8427}
8428
8429static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8430{
8431 u32 val;
8432
8433 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8434
8435 /* disable both bits, even after read */
8436 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8437 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8438 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8439}
8440
8441static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8442 u32 cmd_flags)
8443{
f1410647 8444 int count, i, rc;
a2fbb9ea
ET
8445 u32 val;
8446
8447 /* build the command word */
8448 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8449
8450 /* need to clear DONE bit separately */
8451 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8452
8453 /* address of the NVRAM to read from */
8454 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8455 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8456
8457 /* issue a read command */
8458 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8459
8460 /* adjust timeout for emulation/FPGA */
8461 count = NVRAM_TIMEOUT_COUNT;
8462 if (CHIP_REV_IS_SLOW(bp))
8463 count *= 100;
8464
8465 /* wait for completion */
8466 *ret_val = 0;
8467 rc = -EBUSY;
8468 for (i = 0; i < count; i++) {
8469 udelay(5);
8470 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8471
8472 if (val & MCPR_NVM_COMMAND_DONE) {
8473 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
8474 /* we read nvram data in cpu order
8475 * but ethtool sees it as an array of bytes
8476 * converting to big-endian will do the work */
8477 val = cpu_to_be32(val);
8478 *ret_val = val;
8479 rc = 0;
8480 break;
8481 }
8482 }
8483
8484 return rc;
8485}
8486
8487static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8488 int buf_size)
8489{
8490 int rc;
8491 u32 cmd_flags;
8492 u32 val;
8493
8494 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8495 DP(BNX2X_MSG_NVM,
c14423fe 8496 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8497 offset, buf_size);
8498 return -EINVAL;
8499 }
8500
34f80b04
EG
8501 if (offset + buf_size > bp->common.flash_size) {
8502 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8503 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8504 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8505 return -EINVAL;
8506 }
8507
8508 /* request access to nvram interface */
8509 rc = bnx2x_acquire_nvram_lock(bp);
8510 if (rc)
8511 return rc;
8512
8513 /* enable access to nvram interface */
8514 bnx2x_enable_nvram_access(bp);
8515
8516 /* read the first word(s) */
8517 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8518 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8519 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8520 memcpy(ret_buf, &val, 4);
8521
8522 /* advance to the next dword */
8523 offset += sizeof(u32);
8524 ret_buf += sizeof(u32);
8525 buf_size -= sizeof(u32);
8526 cmd_flags = 0;
8527 }
8528
8529 if (rc == 0) {
8530 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8531 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8532 memcpy(ret_buf, &val, 4);
8533 }
8534
8535 /* disable access to nvram interface */
8536 bnx2x_disable_nvram_access(bp);
8537 bnx2x_release_nvram_lock(bp);
8538
8539 return rc;
8540}
8541
8542static int bnx2x_get_eeprom(struct net_device *dev,
8543 struct ethtool_eeprom *eeprom, u8 *eebuf)
8544{
8545 struct bnx2x *bp = netdev_priv(dev);
8546 int rc;
8547
2add3acb
EG
8548 if (!netif_running(dev))
8549 return -EAGAIN;
8550
34f80b04 8551 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8552 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8553 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8554 eeprom->len, eeprom->len);
8555
8556 /* parameters already validated in ethtool_get_eeprom */
8557
8558 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8559
8560 return rc;
8561}
8562
8563static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8564 u32 cmd_flags)
8565{
f1410647 8566 int count, i, rc;
a2fbb9ea
ET
8567
8568 /* build the command word */
8569 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8570
8571 /* need to clear DONE bit separately */
8572 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8573
8574 /* write the data */
8575 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8576
8577 /* address of the NVRAM to write to */
8578 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8579 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8580
8581 /* issue the write command */
8582 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8583
8584 /* adjust timeout for emulation/FPGA */
8585 count = NVRAM_TIMEOUT_COUNT;
8586 if (CHIP_REV_IS_SLOW(bp))
8587 count *= 100;
8588
8589 /* wait for completion */
8590 rc = -EBUSY;
8591 for (i = 0; i < count; i++) {
8592 udelay(5);
8593 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8594 if (val & MCPR_NVM_COMMAND_DONE) {
8595 rc = 0;
8596 break;
8597 }
8598 }
8599
8600 return rc;
8601}
8602
f1410647 8603#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8604
8605static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8606 int buf_size)
8607{
8608 int rc;
8609 u32 cmd_flags;
8610 u32 align_offset;
8611 u32 val;
8612
34f80b04
EG
8613 if (offset + buf_size > bp->common.flash_size) {
8614 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8615 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8616 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8617 return -EINVAL;
8618 }
8619
8620 /* request access to nvram interface */
8621 rc = bnx2x_acquire_nvram_lock(bp);
8622 if (rc)
8623 return rc;
8624
8625 /* enable access to nvram interface */
8626 bnx2x_enable_nvram_access(bp);
8627
8628 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8629 align_offset = (offset & ~0x03);
8630 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8631
8632 if (rc == 0) {
8633 val &= ~(0xff << BYTE_OFFSET(offset));
8634 val |= (*data_buf << BYTE_OFFSET(offset));
8635
8636 /* nvram data is returned as an array of bytes
8637 * convert it back to cpu order */
8638 val = be32_to_cpu(val);
8639
a2fbb9ea
ET
8640 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8641 cmd_flags);
8642 }
8643
8644 /* disable access to nvram interface */
8645 bnx2x_disable_nvram_access(bp);
8646 bnx2x_release_nvram_lock(bp);
8647
8648 return rc;
8649}
8650
8651static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8652 int buf_size)
8653{
8654 int rc;
8655 u32 cmd_flags;
8656 u32 val;
8657 u32 written_so_far;
8658
34f80b04 8659 if (buf_size == 1) /* ethtool */
a2fbb9ea 8660 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
8661
8662 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8663 DP(BNX2X_MSG_NVM,
c14423fe 8664 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8665 offset, buf_size);
8666 return -EINVAL;
8667 }
8668
34f80b04
EG
8669 if (offset + buf_size > bp->common.flash_size) {
8670 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8671 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8672 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8673 return -EINVAL;
8674 }
8675
8676 /* request access to nvram interface */
8677 rc = bnx2x_acquire_nvram_lock(bp);
8678 if (rc)
8679 return rc;
8680
8681 /* enable access to nvram interface */
8682 bnx2x_enable_nvram_access(bp);
8683
8684 written_so_far = 0;
8685 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8686 while ((written_so_far < buf_size) && (rc == 0)) {
8687 if (written_so_far == (buf_size - sizeof(u32)))
8688 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8689 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8690 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8691 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8692 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8693
8694 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
8695
8696 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8697
8698 /* advance to the next dword */
8699 offset += sizeof(u32);
8700 data_buf += sizeof(u32);
8701 written_so_far += sizeof(u32);
8702 cmd_flags = 0;
8703 }
8704
8705 /* disable access to nvram interface */
8706 bnx2x_disable_nvram_access(bp);
8707 bnx2x_release_nvram_lock(bp);
8708
8709 return rc;
8710}
8711
8712static int bnx2x_set_eeprom(struct net_device *dev,
8713 struct ethtool_eeprom *eeprom, u8 *eebuf)
8714{
8715 struct bnx2x *bp = netdev_priv(dev);
8716 int rc;
8717
9f4c9583
EG
8718 if (!netif_running(dev))
8719 return -EAGAIN;
8720
34f80b04 8721 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8722 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8723 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8724 eeprom->len, eeprom->len);
8725
8726 /* parameters already validated in ethtool_set_eeprom */
8727
c18487ee 8728 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
8729 if (eeprom->magic == 0x00504859)
8730 if (bp->port.pmf) {
8731
4a37fb66 8732 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8733 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8734 bp->link_params.ext_phy_config,
8735 (bp->state != BNX2X_STATE_CLOSED),
8736 eebuf, eeprom->len);
bb2a0f7a
YG
8737 if ((bp->state == BNX2X_STATE_OPEN) ||
8738 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04
EG
8739 rc |= bnx2x_link_reset(&bp->link_params,
8740 &bp->link_vars);
8741 rc |= bnx2x_phy_init(&bp->link_params,
8742 &bp->link_vars);
bb2a0f7a 8743 }
4a37fb66 8744 bnx2x_release_phy_lock(bp);
34f80b04
EG
8745
8746 } else /* Only the PMF can access the PHY */
8747 return -EINVAL;
8748 else
c18487ee 8749 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
8750
8751 return rc;
8752}
8753
8754static int bnx2x_get_coalesce(struct net_device *dev,
8755 struct ethtool_coalesce *coal)
8756{
8757 struct bnx2x *bp = netdev_priv(dev);
8758
8759 memset(coal, 0, sizeof(struct ethtool_coalesce));
8760
8761 coal->rx_coalesce_usecs = bp->rx_ticks;
8762 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
8763
8764 return 0;
8765}
8766
8767static int bnx2x_set_coalesce(struct net_device *dev,
8768 struct ethtool_coalesce *coal)
8769{
8770 struct bnx2x *bp = netdev_priv(dev);
8771
8772 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8773 if (bp->rx_ticks > 3000)
8774 bp->rx_ticks = 3000;
8775
8776 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8777 if (bp->tx_ticks > 0x3000)
8778 bp->tx_ticks = 0x3000;
8779
34f80b04 8780 if (netif_running(dev))
a2fbb9ea
ET
8781 bnx2x_update_coalesce(bp);
8782
8783 return 0;
8784}
8785
8786static void bnx2x_get_ringparam(struct net_device *dev,
8787 struct ethtool_ringparam *ering)
8788{
8789 struct bnx2x *bp = netdev_priv(dev);
8790
8791 ering->rx_max_pending = MAX_RX_AVAIL;
8792 ering->rx_mini_max_pending = 0;
8793 ering->rx_jumbo_max_pending = 0;
8794
8795 ering->rx_pending = bp->rx_ring_size;
8796 ering->rx_mini_pending = 0;
8797 ering->rx_jumbo_pending = 0;
8798
8799 ering->tx_max_pending = MAX_TX_AVAIL;
8800 ering->tx_pending = bp->tx_ring_size;
8801}
8802
8803static int bnx2x_set_ringparam(struct net_device *dev,
8804 struct ethtool_ringparam *ering)
8805{
8806 struct bnx2x *bp = netdev_priv(dev);
34f80b04 8807 int rc = 0;
a2fbb9ea
ET
8808
8809 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8810 (ering->tx_pending > MAX_TX_AVAIL) ||
8811 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8812 return -EINVAL;
8813
8814 bp->rx_ring_size = ering->rx_pending;
8815 bp->tx_ring_size = ering->tx_pending;
8816
34f80b04
EG
8817 if (netif_running(dev)) {
8818 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8819 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
8820 }
8821
34f80b04 8822 return rc;
a2fbb9ea
ET
8823}
8824
8825static void bnx2x_get_pauseparam(struct net_device *dev,
8826 struct ethtool_pauseparam *epause)
8827{
8828 struct bnx2x *bp = netdev_priv(dev);
8829
c0700f90 8830 epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
8831 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8832
c0700f90
DM
8833 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8834 BNX2X_FLOW_CTRL_RX);
8835 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8836 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
8837
8838 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8839 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8840 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8841}
8842
8843static int bnx2x_set_pauseparam(struct net_device *dev,
8844 struct ethtool_pauseparam *epause)
8845{
8846 struct bnx2x *bp = netdev_priv(dev);
8847
34f80b04
EG
8848 if (IS_E1HMF(bp))
8849 return 0;
8850
a2fbb9ea
ET
8851 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8852 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8853 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8854
c0700f90 8855 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 8856
f1410647 8857 if (epause->rx_pause)
c0700f90 8858 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 8859
f1410647 8860 if (epause->tx_pause)
c0700f90 8861 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 8862
c0700f90
DM
8863 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8864 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8865
c18487ee 8866 if (epause->autoneg) {
34f80b04 8867 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 8868 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
8869 return -EINVAL;
8870 }
a2fbb9ea 8871
c18487ee 8872 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 8873 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 8874 }
a2fbb9ea 8875
c18487ee
YR
8876 DP(NETIF_MSG_LINK,
8877 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
8878
8879 if (netif_running(dev)) {
bb2a0f7a 8880 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8881 bnx2x_link_set(bp);
8882 }
a2fbb9ea
ET
8883
8884 return 0;
8885}
8886
df0f2343
VZ
8887static int bnx2x_set_flags(struct net_device *dev, u32 data)
8888{
8889 struct bnx2x *bp = netdev_priv(dev);
8890 int changed = 0;
8891 int rc = 0;
8892
8893 /* TPA requires Rx CSUM offloading */
8894 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8895 if (!(dev->features & NETIF_F_LRO)) {
8896 dev->features |= NETIF_F_LRO;
8897 bp->flags |= TPA_ENABLE_FLAG;
8898 changed = 1;
8899 }
8900
8901 } else if (dev->features & NETIF_F_LRO) {
8902 dev->features &= ~NETIF_F_LRO;
8903 bp->flags &= ~TPA_ENABLE_FLAG;
8904 changed = 1;
8905 }
8906
8907 if (changed && netif_running(dev)) {
8908 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8909 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8910 }
8911
8912 return rc;
8913}
8914
a2fbb9ea
ET
8915static u32 bnx2x_get_rx_csum(struct net_device *dev)
8916{
8917 struct bnx2x *bp = netdev_priv(dev);
8918
8919 return bp->rx_csum;
8920}
8921
8922static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8923{
8924 struct bnx2x *bp = netdev_priv(dev);
df0f2343 8925 int rc = 0;
a2fbb9ea
ET
8926
8927 bp->rx_csum = data;
df0f2343
VZ
8928
8929 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8930 TPA'ed packets will be discarded due to wrong TCP CSUM */
8931 if (!data) {
8932 u32 flags = ethtool_op_get_flags(dev);
8933
8934 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8935 }
8936
8937 return rc;
a2fbb9ea
ET
8938}
8939
8940static int bnx2x_set_tso(struct net_device *dev, u32 data)
8941{
755735eb 8942 if (data) {
a2fbb9ea 8943 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8944 dev->features |= NETIF_F_TSO6;
8945 } else {
a2fbb9ea 8946 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8947 dev->features &= ~NETIF_F_TSO6;
8948 }
8949
a2fbb9ea
ET
8950 return 0;
8951}
8952
f3c87cdd 8953static const struct {
a2fbb9ea
ET
8954 char string[ETH_GSTRING_LEN];
8955} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
8956 { "register_test (offline)" },
8957 { "memory_test (offline)" },
8958 { "loopback_test (offline)" },
8959 { "nvram_test (online)" },
8960 { "interrupt_test (online)" },
8961 { "link_test (online)" },
d3d4f495 8962 { "idle check (online)" }
a2fbb9ea
ET
8963};
8964
8965static int bnx2x_self_test_count(struct net_device *dev)
8966{
8967 return BNX2X_NUM_TESTS;
8968}
8969
f3c87cdd
YG
8970static int bnx2x_test_registers(struct bnx2x *bp)
8971{
8972 int idx, i, rc = -ENODEV;
8973 u32 wr_val = 0;
9dabc424 8974 int port = BP_PORT(bp);
f3c87cdd
YG
8975 static const struct {
8976 u32 offset0;
8977 u32 offset1;
8978 u32 mask;
8979 } reg_tbl[] = {
8980/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8981 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8982 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8983 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8984 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8985 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8986 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8987 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8988 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8989 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8990/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8991 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8992 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8993 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8994 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8995 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8996 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8997 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8998 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8999 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9000/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9001 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9002 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9003 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9004 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9005 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9006 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9007 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9008 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9009 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9010/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9011 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9012 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9013 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9014 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9015 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9016 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9017 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9018
9019 { 0xffffffff, 0, 0x00000000 }
9020 };
9021
9022 if (!netif_running(bp->dev))
9023 return rc;
9024
9025 /* Repeat the test twice:
9026 First by writing 0x00000000, second by writing 0xffffffff */
9027 for (idx = 0; idx < 2; idx++) {
9028
9029 switch (idx) {
9030 case 0:
9031 wr_val = 0;
9032 break;
9033 case 1:
9034 wr_val = 0xffffffff;
9035 break;
9036 }
9037
9038 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9039 u32 offset, mask, save_val, val;
f3c87cdd
YG
9040
9041 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9042 mask = reg_tbl[i].mask;
9043
9044 save_val = REG_RD(bp, offset);
9045
9046 REG_WR(bp, offset, wr_val);
9047 val = REG_RD(bp, offset);
9048
9049 /* Restore the original register's value */
9050 REG_WR(bp, offset, save_val);
9051
9052 /* verify that value is as expected value */
9053 if ((val & mask) != (wr_val & mask))
9054 goto test_reg_exit;
9055 }
9056 }
9057
9058 rc = 0;
9059
9060test_reg_exit:
9061 return rc;
9062}
9063
9064static int bnx2x_test_memory(struct bnx2x *bp)
9065{
9066 int i, j, rc = -ENODEV;
9067 u32 val;
9068 static const struct {
9069 u32 offset;
9070 int size;
9071 } mem_tbl[] = {
9072 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9073 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9074 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9075 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9076 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9077 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9078 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9079
9080 { 0xffffffff, 0 }
9081 };
9082 static const struct {
9083 char *name;
9084 u32 offset;
9dabc424
YG
9085 u32 e1_mask;
9086 u32 e1h_mask;
f3c87cdd 9087 } prty_tbl[] = {
9dabc424
YG
9088 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9089 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9090 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9091 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9092 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9093 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9094
9095 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
9096 };
9097
9098 if (!netif_running(bp->dev))
9099 return rc;
9100
9101 /* Go through all the memories */
9102 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9103 for (j = 0; j < mem_tbl[i].size; j++)
9104 REG_RD(bp, mem_tbl[i].offset + j*4);
9105
9106 /* Check the parity status */
9107 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9108 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
9109 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9110 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
9111 DP(NETIF_MSG_HW,
9112 "%s is 0x%x\n", prty_tbl[i].name, val);
9113 goto test_mem_exit;
9114 }
9115 }
9116
9117 rc = 0;
9118
9119test_mem_exit:
9120 return rc;
9121}
9122
f3c87cdd
YG
9123static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9124{
9125 int cnt = 1000;
9126
9127 if (link_up)
9128 while (bnx2x_link_test(bp) && cnt--)
9129 msleep(10);
9130}
9131
9132static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9133{
9134 unsigned int pkt_size, num_pkts, i;
9135 struct sk_buff *skb;
9136 unsigned char *packet;
9137 struct bnx2x_fastpath *fp = &bp->fp[0];
9138 u16 tx_start_idx, tx_idx;
9139 u16 rx_start_idx, rx_idx;
9140 u16 pkt_prod;
9141 struct sw_tx_bd *tx_buf;
9142 struct eth_tx_bd *tx_bd;
9143 dma_addr_t mapping;
9144 union eth_rx_cqe *cqe;
9145 u8 cqe_fp_flags;
9146 struct sw_rx_bd *rx_buf;
9147 u16 len;
9148 int rc = -ENODEV;
9149
9150 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
9151 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 9152 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
f3c87cdd
YG
9153
9154 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
3910c8ae 9155 u16 cnt = 1000;
f3c87cdd 9156 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
f3c87cdd 9157 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
f3c87cdd 9158 /* wait until link state is restored */
3910c8ae
EG
9159 if (link_up)
9160 while (cnt-- && bnx2x_test_link(&bp->link_params,
9161 &bp->link_vars))
9162 msleep(10);
f3c87cdd
YG
9163 } else
9164 return -EINVAL;
9165
9166 pkt_size = 1514;
9167 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9168 if (!skb) {
9169 rc = -ENOMEM;
9170 goto test_loopback_exit;
9171 }
9172 packet = skb_put(skb, pkt_size);
9173 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9174 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9175 for (i = ETH_HLEN; i < pkt_size; i++)
9176 packet[i] = (unsigned char) (i & 0xff);
9177
9178 num_pkts = 0;
9179 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9180 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9181
9182 pkt_prod = fp->tx_pkt_prod++;
9183 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9184 tx_buf->first_bd = fp->tx_bd_prod;
9185 tx_buf->skb = skb;
9186
9187 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9188 mapping = pci_map_single(bp->pdev, skb->data,
9189 skb_headlen(skb), PCI_DMA_TODEVICE);
9190 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9191 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9192 tx_bd->nbd = cpu_to_le16(1);
9193 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9194 tx_bd->vlan = cpu_to_le16(pkt_prod);
9195 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9196 ETH_TX_BD_FLAGS_END_BD);
9197 tx_bd->general_data = ((UNICAST_ADDRESS <<
9198 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9199
58f4c4cf
EG
9200 wmb();
9201
f3c87cdd
YG
9202 fp->hw_tx_prods->bds_prod =
9203 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
9204 mb(); /* FW restriction: must not reorder writing nbd and packets */
9205 fp->hw_tx_prods->packets_prod =
9206 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9207 DOORBELL(bp, FP_IDX(fp), 0);
9208
9209 mmiowb();
9210
9211 num_pkts++;
9212 fp->tx_bd_prod++;
9213 bp->dev->trans_start = jiffies;
9214
9215 udelay(100);
9216
9217 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9218 if (tx_idx != tx_start_idx + num_pkts)
9219 goto test_loopback_exit;
9220
9221 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9222 if (rx_idx != rx_start_idx + num_pkts)
9223 goto test_loopback_exit;
9224
9225 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9226 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9227 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9228 goto test_loopback_rx_exit;
9229
9230 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9231 if (len != pkt_size)
9232 goto test_loopback_rx_exit;
9233
9234 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9235 skb = rx_buf->skb;
9236 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9237 for (i = ETH_HLEN; i < pkt_size; i++)
9238 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9239 goto test_loopback_rx_exit;
9240
9241 rc = 0;
9242
9243test_loopback_rx_exit:
f3c87cdd
YG
9244
9245 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9246 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9247 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9248 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9249
9250 /* Update producers */
9251 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9252 fp->rx_sge_prod);
f3c87cdd
YG
9253
9254test_loopback_exit:
9255 bp->link_params.loopback_mode = LOOPBACK_NONE;
9256
9257 return rc;
9258}
9259
9260static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9261{
9262 int rc = 0;
9263
9264 if (!netif_running(bp->dev))
9265 return BNX2X_LOOPBACK_FAILED;
9266
f8ef6e44 9267 bnx2x_netif_stop(bp, 1);
3910c8ae 9268 bnx2x_acquire_phy_lock(bp);
f3c87cdd
YG
9269
9270 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
9271 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
9272 rc |= BNX2X_MAC_LOOPBACK_FAILED;
9273 }
9274
9275 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
9276 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
9277 rc |= BNX2X_PHY_LOOPBACK_FAILED;
9278 }
9279
3910c8ae 9280 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
9281 bnx2x_netif_start(bp);
9282
9283 return rc;
9284}
9285
9286#define CRC32_RESIDUAL 0xdebb20e3
9287
9288static int bnx2x_test_nvram(struct bnx2x *bp)
9289{
9290 static const struct {
9291 int offset;
9292 int size;
9293 } nvram_tbl[] = {
9294 { 0, 0x14 }, /* bootstrap */
9295 { 0x14, 0xec }, /* dir */
9296 { 0x100, 0x350 }, /* manuf_info */
9297 { 0x450, 0xf0 }, /* feature_info */
9298 { 0x640, 0x64 }, /* upgrade_key_info */
9299 { 0x6a4, 0x64 },
9300 { 0x708, 0x70 }, /* manuf_key_info */
9301 { 0x778, 0x70 },
9302 { 0, 0 }
9303 };
9304 u32 buf[0x350 / 4];
9305 u8 *data = (u8 *)buf;
9306 int i, rc;
9307 u32 magic, csum;
9308
9309 rc = bnx2x_nvram_read(bp, 0, data, 4);
9310 if (rc) {
9311 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
9312 goto test_nvram_exit;
9313 }
9314
9315 magic = be32_to_cpu(buf[0]);
9316 if (magic != 0x669955aa) {
9317 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9318 rc = -ENODEV;
9319 goto test_nvram_exit;
9320 }
9321
9322 for (i = 0; nvram_tbl[i].size; i++) {
9323
9324 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9325 nvram_tbl[i].size);
9326 if (rc) {
9327 DP(NETIF_MSG_PROBE,
9328 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
9329 goto test_nvram_exit;
9330 }
9331
9332 csum = ether_crc_le(nvram_tbl[i].size, data);
9333 if (csum != CRC32_RESIDUAL) {
9334 DP(NETIF_MSG_PROBE,
9335 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9336 rc = -ENODEV;
9337 goto test_nvram_exit;
9338 }
9339 }
9340
9341test_nvram_exit:
9342 return rc;
9343}
9344
9345static int bnx2x_test_intr(struct bnx2x *bp)
9346{
9347 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9348 int i, rc;
9349
9350 if (!netif_running(bp->dev))
9351 return -ENODEV;
9352
8d9c5f34 9353 config->hdr.length = 0;
af246401
EG
9354 if (CHIP_IS_E1(bp))
9355 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9356 else
9357 config->hdr.offset = BP_FUNC(bp);
f3c87cdd
YG
9358 config->hdr.client_id = BP_CL_ID(bp);
9359 config->hdr.reserved1 = 0;
9360
9361 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9362 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9363 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9364 if (rc == 0) {
9365 bp->set_mac_pending++;
9366 for (i = 0; i < 10; i++) {
9367 if (!bp->set_mac_pending)
9368 break;
9369 msleep_interruptible(10);
9370 }
9371 if (i == 10)
9372 rc = -ENODEV;
9373 }
9374
9375 return rc;
9376}
9377
a2fbb9ea
ET
9378static void bnx2x_self_test(struct net_device *dev,
9379 struct ethtool_test *etest, u64 *buf)
9380{
9381 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
9382
9383 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9384
f3c87cdd 9385 if (!netif_running(dev))
a2fbb9ea 9386 return;
a2fbb9ea 9387
33471629 9388 /* offline tests are not supported in MF mode */
f3c87cdd
YG
9389 if (IS_E1HMF(bp))
9390 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9391
9392 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9393 u8 link_up;
9394
9395 link_up = bp->link_vars.link_up;
9396 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9397 bnx2x_nic_load(bp, LOAD_DIAG);
9398 /* wait until link state is restored */
9399 bnx2x_wait_for_link(bp, link_up);
9400
9401 if (bnx2x_test_registers(bp) != 0) {
9402 buf[0] = 1;
9403 etest->flags |= ETH_TEST_FL_FAILED;
9404 }
9405 if (bnx2x_test_memory(bp) != 0) {
9406 buf[1] = 1;
9407 etest->flags |= ETH_TEST_FL_FAILED;
9408 }
9409 buf[2] = bnx2x_test_loopback(bp, link_up);
9410 if (buf[2] != 0)
9411 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 9412
f3c87cdd
YG
9413 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9414 bnx2x_nic_load(bp, LOAD_NORMAL);
9415 /* wait until link state is restored */
9416 bnx2x_wait_for_link(bp, link_up);
9417 }
9418 if (bnx2x_test_nvram(bp) != 0) {
9419 buf[3] = 1;
a2fbb9ea
ET
9420 etest->flags |= ETH_TEST_FL_FAILED;
9421 }
f3c87cdd
YG
9422 if (bnx2x_test_intr(bp) != 0) {
9423 buf[4] = 1;
9424 etest->flags |= ETH_TEST_FL_FAILED;
9425 }
9426 if (bp->port.pmf)
9427 if (bnx2x_link_test(bp) != 0) {
9428 buf[5] = 1;
9429 etest->flags |= ETH_TEST_FL_FAILED;
9430 }
f3c87cdd
YG
9431
9432#ifdef BNX2X_EXTRA_DEBUG
9433 bnx2x_panic_dump(bp);
9434#endif
a2fbb9ea
ET
9435}
9436
de832a55
EG
9437static const struct {
9438 long offset;
9439 int size;
9440 u8 string[ETH_GSTRING_LEN];
9441} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9442/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9443 { Q_STATS_OFFSET32(error_bytes_received_hi),
9444 8, "[%d]: rx_error_bytes" },
9445 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9446 8, "[%d]: rx_ucast_packets" },
9447 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9448 8, "[%d]: rx_mcast_packets" },
9449 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9450 8, "[%d]: rx_bcast_packets" },
9451 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9452 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9453 4, "[%d]: rx_phy_ip_err_discards"},
9454 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9455 4, "[%d]: rx_skb_alloc_discard" },
9456 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9457
9458/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9459 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9460 8, "[%d]: tx_packets" }
9461};
9462
bb2a0f7a
YG
9463static const struct {
9464 long offset;
9465 int size;
9466 u32 flags;
66e855f3
YG
9467#define STATS_FLAGS_PORT 1
9468#define STATS_FLAGS_FUNC 2
de832a55 9469#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 9470 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 9471} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
9472/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9473 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 9474 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 9475 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 9476 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 9477 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 9478 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 9479 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 9480 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 9481 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 9482 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 9483 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 9484 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 9485 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
9486 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9487 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9488 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9489 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9490/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9491 8, STATS_FLAGS_PORT, "rx_fragments" },
9492 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9493 8, STATS_FLAGS_PORT, "rx_jabbers" },
9494 { STATS_OFFSET32(no_buff_discard_hi),
9495 8, STATS_FLAGS_BOTH, "rx_discards" },
9496 { STATS_OFFSET32(mac_filter_discard),
9497 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9498 { STATS_OFFSET32(xxoverflow_discard),
9499 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9500 { STATS_OFFSET32(brb_drop_hi),
9501 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9502 { STATS_OFFSET32(brb_truncate_hi),
9503 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9504 { STATS_OFFSET32(pause_frames_received_hi),
9505 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9506 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9507 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9508 { STATS_OFFSET32(nig_timer_max),
9509 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9510/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9511 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9512 { STATS_OFFSET32(rx_skb_alloc_failed),
9513 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9514 { STATS_OFFSET32(hw_csum_err),
9515 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9516
9517 { STATS_OFFSET32(total_bytes_transmitted_hi),
9518 8, STATS_FLAGS_BOTH, "tx_bytes" },
9519 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9520 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9521 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9522 8, STATS_FLAGS_BOTH, "tx_packets" },
9523 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9524 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9525 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9526 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 9527 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 9528 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 9529 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 9530 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 9531/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 9532 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 9533 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 9534 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 9535 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 9536 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 9537 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 9538 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 9539 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 9540 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 9541 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 9542 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 9543 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 9544 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 9545 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 9546 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 9547 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 9548 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 9549 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 9550 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 9551/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 9552 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
9553 { STATS_OFFSET32(pause_frames_sent_hi),
9554 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
9555};
9556
de832a55
EG
9557#define IS_PORT_STAT(i) \
9558 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9559#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9560#define IS_E1HMF_MODE_STAT(bp) \
9561 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
66e855f3 9562
a2fbb9ea
ET
9563static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9564{
bb2a0f7a 9565 struct bnx2x *bp = netdev_priv(dev);
de832a55 9566 int i, j, k;
bb2a0f7a 9567
a2fbb9ea
ET
9568 switch (stringset) {
9569 case ETH_SS_STATS:
de832a55
EG
9570 if (is_multi(bp)) {
9571 k = 0;
9572 for_each_queue(bp, i) {
9573 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9574 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9575 bnx2x_q_stats_arr[j].string, i);
9576 k += BNX2X_NUM_Q_STATS;
9577 }
9578 if (IS_E1HMF_MODE_STAT(bp))
9579 break;
9580 for (j = 0; j < BNX2X_NUM_STATS; j++)
9581 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9582 bnx2x_stats_arr[j].string);
9583 } else {
9584 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9585 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9586 continue;
9587 strcpy(buf + j*ETH_GSTRING_LEN,
9588 bnx2x_stats_arr[i].string);
9589 j++;
9590 }
bb2a0f7a 9591 }
a2fbb9ea
ET
9592 break;
9593
9594 case ETH_SS_TEST:
9595 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9596 break;
9597 }
9598}
9599
9600static int bnx2x_get_stats_count(struct net_device *dev)
9601{
bb2a0f7a 9602 struct bnx2x *bp = netdev_priv(dev);
de832a55 9603 int i, num_stats;
bb2a0f7a 9604
de832a55
EG
9605 if (is_multi(bp)) {
9606 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9607 if (!IS_E1HMF_MODE_STAT(bp))
9608 num_stats += BNX2X_NUM_STATS;
9609 } else {
9610 if (IS_E1HMF_MODE_STAT(bp)) {
9611 num_stats = 0;
9612 for (i = 0; i < BNX2X_NUM_STATS; i++)
9613 if (IS_FUNC_STAT(i))
9614 num_stats++;
9615 } else
9616 num_stats = BNX2X_NUM_STATS;
bb2a0f7a 9617 }
de832a55 9618
bb2a0f7a 9619 return num_stats;
a2fbb9ea
ET
9620}
9621
9622static void bnx2x_get_ethtool_stats(struct net_device *dev,
9623 struct ethtool_stats *stats, u64 *buf)
9624{
9625 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
9626 u32 *hw_stats, *offset;
9627 int i, j, k;
bb2a0f7a 9628
de832a55
EG
9629 if (is_multi(bp)) {
9630 k = 0;
9631 for_each_queue(bp, i) {
9632 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
9633 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
9634 if (bnx2x_q_stats_arr[j].size == 0) {
9635 /* skip this counter */
9636 buf[k + j] = 0;
9637 continue;
9638 }
9639 offset = (hw_stats +
9640 bnx2x_q_stats_arr[j].offset);
9641 if (bnx2x_q_stats_arr[j].size == 4) {
9642 /* 4-byte counter */
9643 buf[k + j] = (u64) *offset;
9644 continue;
9645 }
9646 /* 8-byte counter */
9647 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9648 }
9649 k += BNX2X_NUM_Q_STATS;
9650 }
9651 if (IS_E1HMF_MODE_STAT(bp))
9652 return;
9653 hw_stats = (u32 *)&bp->eth_stats;
9654 for (j = 0; j < BNX2X_NUM_STATS; j++) {
9655 if (bnx2x_stats_arr[j].size == 0) {
9656 /* skip this counter */
9657 buf[k + j] = 0;
9658 continue;
9659 }
9660 offset = (hw_stats + bnx2x_stats_arr[j].offset);
9661 if (bnx2x_stats_arr[j].size == 4) {
9662 /* 4-byte counter */
9663 buf[k + j] = (u64) *offset;
9664 continue;
9665 }
9666 /* 8-byte counter */
9667 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 9668 }
de832a55
EG
9669 } else {
9670 hw_stats = (u32 *)&bp->eth_stats;
9671 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9672 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9673 continue;
9674 if (bnx2x_stats_arr[i].size == 0) {
9675 /* skip this counter */
9676 buf[j] = 0;
9677 j++;
9678 continue;
9679 }
9680 offset = (hw_stats + bnx2x_stats_arr[i].offset);
9681 if (bnx2x_stats_arr[i].size == 4) {
9682 /* 4-byte counter */
9683 buf[j] = (u64) *offset;
9684 j++;
9685 continue;
9686 }
9687 /* 8-byte counter */
9688 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 9689 j++;
a2fbb9ea 9690 }
a2fbb9ea
ET
9691 }
9692}
9693
9694static int bnx2x_phys_id(struct net_device *dev, u32 data)
9695{
9696 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9697 int port = BP_PORT(bp);
a2fbb9ea
ET
9698 int i;
9699
34f80b04
EG
9700 if (!netif_running(dev))
9701 return 0;
9702
9703 if (!bp->port.pmf)
9704 return 0;
9705
a2fbb9ea
ET
9706 if (data == 0)
9707 data = 2;
9708
9709 for (i = 0; i < (data * 2); i++) {
c18487ee 9710 if ((i % 2) == 0)
34f80b04 9711 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
9712 bp->link_params.hw_led_mode,
9713 bp->link_params.chip_id);
9714 else
34f80b04 9715 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
9716 bp->link_params.hw_led_mode,
9717 bp->link_params.chip_id);
9718
a2fbb9ea
ET
9719 msleep_interruptible(500);
9720 if (signal_pending(current))
9721 break;
9722 }
9723
c18487ee 9724 if (bp->link_vars.link_up)
34f80b04 9725 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
9726 bp->link_vars.line_speed,
9727 bp->link_params.hw_led_mode,
9728 bp->link_params.chip_id);
a2fbb9ea
ET
9729
9730 return 0;
9731}
9732
9733static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
9734 .get_settings = bnx2x_get_settings,
9735 .set_settings = bnx2x_set_settings,
9736 .get_drvinfo = bnx2x_get_drvinfo,
a2fbb9ea
ET
9737 .get_wol = bnx2x_get_wol,
9738 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
9739 .get_msglevel = bnx2x_get_msglevel,
9740 .set_msglevel = bnx2x_set_msglevel,
9741 .nway_reset = bnx2x_nway_reset,
9742 .get_link = ethtool_op_get_link,
9743 .get_eeprom_len = bnx2x_get_eeprom_len,
9744 .get_eeprom = bnx2x_get_eeprom,
9745 .set_eeprom = bnx2x_set_eeprom,
9746 .get_coalesce = bnx2x_get_coalesce,
9747 .set_coalesce = bnx2x_set_coalesce,
9748 .get_ringparam = bnx2x_get_ringparam,
9749 .set_ringparam = bnx2x_set_ringparam,
9750 .get_pauseparam = bnx2x_get_pauseparam,
9751 .set_pauseparam = bnx2x_set_pauseparam,
9752 .get_rx_csum = bnx2x_get_rx_csum,
9753 .set_rx_csum = bnx2x_set_rx_csum,
9754 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 9755 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
9756 .set_flags = bnx2x_set_flags,
9757 .get_flags = ethtool_op_get_flags,
9758 .get_sg = ethtool_op_get_sg,
9759 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
9760 .get_tso = ethtool_op_get_tso,
9761 .set_tso = bnx2x_set_tso,
9762 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
9763 .self_test = bnx2x_self_test,
9764 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
9765 .phys_id = bnx2x_phys_id,
9766 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 9767 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
9768};
9769
9770/* end of ethtool_ops */
9771
9772/****************************************************************************
9773* General service functions
9774****************************************************************************/
9775
9776static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9777{
9778 u16 pmcsr;
9779
9780 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9781
9782 switch (state) {
9783 case PCI_D0:
34f80b04 9784 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
9785 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9786 PCI_PM_CTRL_PME_STATUS));
9787
9788 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 9789 /* delay required during transition out of D3hot */
a2fbb9ea 9790 msleep(20);
34f80b04 9791 break;
a2fbb9ea 9792
34f80b04
EG
9793 case PCI_D3hot:
9794 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9795 pmcsr |= 3;
a2fbb9ea 9796
34f80b04
EG
9797 if (bp->wol)
9798 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 9799
34f80b04
EG
9800 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9801 pmcsr);
a2fbb9ea 9802
34f80b04
EG
9803 /* No more memory access after this point until
9804 * device is brought back to D0.
9805 */
9806 break;
9807
9808 default:
9809 return -EINVAL;
9810 }
9811 return 0;
a2fbb9ea
ET
9812}
9813
237907c1
EG
9814static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
9815{
9816 u16 rx_cons_sb;
9817
9818 /* Tell compiler that status block fields can change */
9819 barrier();
9820 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9821 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9822 rx_cons_sb++;
9823 return (fp->rx_comp_cons != rx_cons_sb);
9824}
9825
34f80b04
EG
9826/*
9827 * net_device service functions
9828 */
9829
a2fbb9ea
ET
9830static int bnx2x_poll(struct napi_struct *napi, int budget)
9831{
9832 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9833 napi);
9834 struct bnx2x *bp = fp->bp;
9835 int work_done = 0;
9836
9837#ifdef BNX2X_STOP_ON_ERROR
9838 if (unlikely(bp->panic))
34f80b04 9839 goto poll_panic;
a2fbb9ea
ET
9840#endif
9841
9842 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9843 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9844 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9845
9846 bnx2x_update_fpsb_idx(fp);
9847
237907c1 9848 if (bnx2x_has_tx_work(fp))
a2fbb9ea
ET
9849 bnx2x_tx_int(fp, budget);
9850
237907c1 9851 if (bnx2x_has_rx_work(fp))
a2fbb9ea 9852 work_done = bnx2x_rx_int(fp, budget);
da5a662a 9853 rmb(); /* BNX2X_HAS_WORK() reads the status block */
a2fbb9ea
ET
9854
9855 /* must not complete if we consumed full budget */
da5a662a 9856 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
a2fbb9ea
ET
9857
9858#ifdef BNX2X_STOP_ON_ERROR
34f80b04 9859poll_panic:
a2fbb9ea 9860#endif
288379f0 9861 napi_complete(napi);
a2fbb9ea 9862
34f80b04 9863 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
a2fbb9ea 9864 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
34f80b04 9865 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
a2fbb9ea
ET
9866 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9867 }
a2fbb9ea
ET
9868 return work_done;
9869}
9870
755735eb
EG
9871
9872/* we split the first BD into headers and data BDs
33471629 9873 * to ease the pain of our fellow microcode engineers
755735eb
EG
9874 * we use one mapping for both BDs
9875 * So far this has only been observed to happen
9876 * in Other Operating Systems(TM)
9877 */
9878static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9879 struct bnx2x_fastpath *fp,
9880 struct eth_tx_bd **tx_bd, u16 hlen,
9881 u16 bd_prod, int nbd)
9882{
9883 struct eth_tx_bd *h_tx_bd = *tx_bd;
9884 struct eth_tx_bd *d_tx_bd;
9885 dma_addr_t mapping;
9886 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9887
9888 /* first fix first BD */
9889 h_tx_bd->nbd = cpu_to_le16(nbd);
9890 h_tx_bd->nbytes = cpu_to_le16(hlen);
9891
9892 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9893 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9894 h_tx_bd->addr_lo, h_tx_bd->nbd);
9895
9896 /* now get a new data BD
9897 * (after the pbd) and fill it */
9898 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9899 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9900
9901 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9902 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9903
9904 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9905 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9906 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9907 d_tx_bd->vlan = 0;
9908 /* this marks the BD as one that has no individual mapping
9909 * the FW ignores this flag in a BD not marked start
9910 */
9911 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9912 DP(NETIF_MSG_TX_QUEUED,
9913 "TSO split data size is %d (%x:%x)\n",
9914 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9915
9916 /* update tx_bd for marking the last BD flag */
9917 *tx_bd = d_tx_bd;
9918
9919 return bd_prod;
9920}
9921
9922static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9923{
9924 if (fix > 0)
9925 csum = (u16) ~csum_fold(csum_sub(csum,
9926 csum_partial(t_header - fix, fix, 0)));
9927
9928 else if (fix < 0)
9929 csum = (u16) ~csum_fold(csum_add(csum,
9930 csum_partial(t_header, -fix, 0)));
9931
9932 return swab16(csum);
9933}
9934
9935static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9936{
9937 u32 rc;
9938
9939 if (skb->ip_summed != CHECKSUM_PARTIAL)
9940 rc = XMIT_PLAIN;
9941
9942 else {
9943 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9944 rc = XMIT_CSUM_V6;
9945 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9946 rc |= XMIT_CSUM_TCP;
9947
9948 } else {
9949 rc = XMIT_CSUM_V4;
9950 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9951 rc |= XMIT_CSUM_TCP;
9952 }
9953 }
9954
9955 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9956 rc |= XMIT_GSO_V4;
9957
9958 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9959 rc |= XMIT_GSO_V6;
9960
9961 return rc;
9962}
9963
632da4d6 9964#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
755735eb
EG
9965/* check if packet requires linearization (packet is too fragmented) */
9966static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9967 u32 xmit_type)
9968{
9969 int to_copy = 0;
9970 int hlen = 0;
9971 int first_bd_sz = 0;
9972
9973 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9974 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9975
9976 if (xmit_type & XMIT_GSO) {
9977 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9978 /* Check if LSO packet needs to be copied:
9979 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9980 int wnd_size = MAX_FETCH_BD - 3;
33471629 9981 /* Number of windows to check */
755735eb
EG
9982 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9983 int wnd_idx = 0;
9984 int frag_idx = 0;
9985 u32 wnd_sum = 0;
9986
9987 /* Headers length */
9988 hlen = (int)(skb_transport_header(skb) - skb->data) +
9989 tcp_hdrlen(skb);
9990
9991 /* Amount of data (w/o headers) on linear part of SKB*/
9992 first_bd_sz = skb_headlen(skb) - hlen;
9993
9994 wnd_sum = first_bd_sz;
9995
9996 /* Calculate the first sum - it's special */
9997 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9998 wnd_sum +=
9999 skb_shinfo(skb)->frags[frag_idx].size;
10000
10001 /* If there was data on linear skb data - check it */
10002 if (first_bd_sz > 0) {
10003 if (unlikely(wnd_sum < lso_mss)) {
10004 to_copy = 1;
10005 goto exit_lbl;
10006 }
10007
10008 wnd_sum -= first_bd_sz;
10009 }
10010
10011 /* Others are easier: run through the frag list and
10012 check all windows */
10013 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10014 wnd_sum +=
10015 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10016
10017 if (unlikely(wnd_sum < lso_mss)) {
10018 to_copy = 1;
10019 break;
10020 }
10021 wnd_sum -=
10022 skb_shinfo(skb)->frags[wnd_idx].size;
10023 }
10024
10025 } else {
10026 /* in non-LSO too fragmented packet should always
10027 be linearized */
10028 to_copy = 1;
10029 }
10030 }
10031
10032exit_lbl:
10033 if (unlikely(to_copy))
10034 DP(NETIF_MSG_TX_QUEUED,
10035 "Linearization IS REQUIRED for %s packet. "
10036 "num_frags %d hlen %d first_bd_sz %d\n",
10037 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10038 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10039
10040 return to_copy;
10041}
632da4d6 10042#endif
755735eb
EG
10043
10044/* called with netif_tx_lock
a2fbb9ea 10045 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 10046 * netif_wake_queue()
a2fbb9ea
ET
10047 */
10048static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10049{
10050 struct bnx2x *bp = netdev_priv(dev);
10051 struct bnx2x_fastpath *fp;
555f6c78 10052 struct netdev_queue *txq;
a2fbb9ea
ET
10053 struct sw_tx_bd *tx_buf;
10054 struct eth_tx_bd *tx_bd;
10055 struct eth_tx_parse_bd *pbd = NULL;
10056 u16 pkt_prod, bd_prod;
755735eb 10057 int nbd, fp_index;
a2fbb9ea 10058 dma_addr_t mapping;
755735eb
EG
10059 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10060 int vlan_off = (bp->e1hov ? 4 : 0);
10061 int i;
10062 u8 hlen = 0;
a2fbb9ea
ET
10063
10064#ifdef BNX2X_STOP_ON_ERROR
10065 if (unlikely(bp->panic))
10066 return NETDEV_TX_BUSY;
10067#endif
10068
555f6c78
EG
10069 fp_index = skb_get_queue_mapping(skb);
10070 txq = netdev_get_tx_queue(dev, fp_index);
10071
a2fbb9ea 10072 fp = &bp->fp[fp_index];
755735eb 10073
231fd58a 10074 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
de832a55 10075 fp->eth_q_stats.driver_xoff++,
555f6c78 10076 netif_tx_stop_queue(txq);
a2fbb9ea
ET
10077 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10078 return NETDEV_TX_BUSY;
10079 }
10080
755735eb
EG
10081 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10082 " gso type %x xmit_type %x\n",
10083 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10084 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10085
632da4d6 10086#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
33471629 10087 /* First, check if we need to linearize the skb
755735eb
EG
10088 (due to FW restrictions) */
10089 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10090 /* Statistics of linearization */
10091 bp->lin_cnt++;
10092 if (skb_linearize(skb) != 0) {
10093 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10094 "silently dropping this SKB\n");
10095 dev_kfree_skb_any(skb);
da5a662a 10096 return NETDEV_TX_OK;
755735eb
EG
10097 }
10098 }
632da4d6 10099#endif
755735eb 10100
a2fbb9ea 10101 /*
755735eb 10102 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 10103 then for TSO or xsum we have a parsing info BD,
755735eb 10104 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
10105 (don't forget to mark the last one as last,
10106 and to unmap only AFTER you write to the BD ...)
755735eb 10107 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
10108 */
10109
10110 pkt_prod = fp->tx_pkt_prod++;
755735eb 10111 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 10112
755735eb 10113 /* get a tx_buf and first BD */
a2fbb9ea
ET
10114 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10115 tx_bd = &fp->tx_desc_ring[bd_prod];
10116
10117 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10118 tx_bd->general_data = (UNICAST_ADDRESS <<
10119 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a
EG
10120 /* header nbd */
10121 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
a2fbb9ea 10122
755735eb
EG
10123 /* remember the first BD of the packet */
10124 tx_buf->first_bd = fp->tx_bd_prod;
10125 tx_buf->skb = skb;
a2fbb9ea
ET
10126
10127 DP(NETIF_MSG_TX_QUEUED,
10128 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10129 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10130
0c6671b0
EG
10131#ifdef BCM_VLAN
10132 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10133 (bp->flags & HW_VLAN_TX_FLAG)) {
755735eb
EG
10134 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10135 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10136 vlan_off += 4;
10137 } else
0c6671b0 10138#endif
755735eb 10139 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 10140
755735eb 10141 if (xmit_type) {
755735eb 10142 /* turn on parsing and get a BD */
a2fbb9ea
ET
10143 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10144 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
10145
10146 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10147 }
10148
10149 if (xmit_type & XMIT_CSUM) {
10150 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
10151
10152 /* for now NS flag is not used in Linux */
755735eb 10153 pbd->global_data = (hlen |
96fc1784 10154 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
a2fbb9ea 10155 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 10156
755735eb
EG
10157 pbd->ip_hlen = (skb_transport_header(skb) -
10158 skb_network_header(skb)) / 2;
10159
10160 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 10161
755735eb
EG
10162 pbd->total_hlen = cpu_to_le16(hlen);
10163 hlen = hlen*2 - vlan_off;
a2fbb9ea 10164
755735eb
EG
10165 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10166
10167 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 10168 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
10169 ETH_TX_BD_FLAGS_IP_CSUM;
10170 else
10171 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10172
10173 if (xmit_type & XMIT_CSUM_TCP) {
10174 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10175
10176 } else {
10177 s8 fix = SKB_CS_OFF(skb); /* signed! */
10178
a2fbb9ea 10179 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 10180 pbd->cs_offset = fix / 2;
a2fbb9ea 10181
755735eb
EG
10182 DP(NETIF_MSG_TX_QUEUED,
10183 "hlen %d offset %d fix %d csum before fix %x\n",
10184 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10185 SKB_CS(skb));
10186
10187 /* HW bug: fixup the CSUM */
10188 pbd->tcp_pseudo_csum =
10189 bnx2x_csum_fix(skb_transport_header(skb),
10190 SKB_CS(skb), fix);
10191
10192 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10193 pbd->tcp_pseudo_csum);
10194 }
a2fbb9ea
ET
10195 }
10196
10197 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 10198 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
10199
10200 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10201 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6378c025 10202 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
a2fbb9ea
ET
10203 tx_bd->nbd = cpu_to_le16(nbd);
10204 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10205
10206 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
10207 " nbytes %d flags %x vlan %x\n",
10208 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10209 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10210 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 10211
755735eb 10212 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
10213
10214 DP(NETIF_MSG_TX_QUEUED,
10215 "TSO packet len %d hlen %d total len %d tso size %d\n",
10216 skb->len, hlen, skb_headlen(skb),
10217 skb_shinfo(skb)->gso_size);
10218
10219 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10220
755735eb
EG
10221 if (unlikely(skb_headlen(skb) > hlen))
10222 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10223 bd_prod, ++nbd);
a2fbb9ea
ET
10224
10225 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10226 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
10227 pbd->tcp_flags = pbd_tcp_flags(skb);
10228
10229 if (xmit_type & XMIT_GSO_V4) {
10230 pbd->ip_id = swab16(ip_hdr(skb)->id);
10231 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
10232 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10233 ip_hdr(skb)->daddr,
10234 0, IPPROTO_TCP, 0));
755735eb
EG
10235
10236 } else
10237 pbd->tcp_pseudo_csum =
10238 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10239 &ipv6_hdr(skb)->daddr,
10240 0, IPPROTO_TCP, 0));
10241
a2fbb9ea
ET
10242 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10243 }
10244
755735eb
EG
10245 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10246 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 10247
755735eb
EG
10248 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10249 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 10250
755735eb
EG
10251 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10252 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 10253
755735eb
EG
10254 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10255 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10256 tx_bd->nbytes = cpu_to_le16(frag->size);
10257 tx_bd->vlan = cpu_to_le16(pkt_prod);
10258 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 10259
755735eb
EG
10260 DP(NETIF_MSG_TX_QUEUED,
10261 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
10262 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10263 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
10264 }
10265
755735eb 10266 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
10267 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10268
10269 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
10270 tx_bd, tx_bd->bd_flags.as_bitfield);
10271
a2fbb9ea
ET
10272 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10273
755735eb 10274 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
10275 * if the packet contains or ends with it
10276 */
10277 if (TX_BD_POFF(bd_prod) < nbd)
10278 nbd++;
10279
10280 if (pbd)
10281 DP(NETIF_MSG_TX_QUEUED,
10282 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10283 " tcp_flags %x xsum %x seq %u hlen %u\n",
10284 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10285 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 10286 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 10287
755735eb 10288 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 10289
58f4c4cf
EG
10290 /*
10291 * Make sure that the BD data is updated before updating the producer
10292 * since FW might read the BD right after the producer is updated.
10293 * This is only applicable for weak-ordered memory model archs such
10294 * as IA-64. The following barrier is also mandatory since FW will
10295 * assumes packets must have BDs.
10296 */
10297 wmb();
10298
96fc1784
ET
10299 fp->hw_tx_prods->bds_prod =
10300 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
a2fbb9ea 10301 mb(); /* FW restriction: must not reorder writing nbd and packets */
96fc1784
ET
10302 fp->hw_tx_prods->packets_prod =
10303 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
755735eb 10304 DOORBELL(bp, FP_IDX(fp), 0);
a2fbb9ea
ET
10305
10306 mmiowb();
10307
755735eb 10308 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
10309 dev->trans_start = jiffies;
10310
10311 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
58f4c4cf
EG
10312 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10313 if we put Tx into XOFF state. */
10314 smp_mb();
555f6c78 10315 netif_tx_stop_queue(txq);
de832a55 10316 fp->eth_q_stats.driver_xoff++;
a2fbb9ea 10317 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 10318 netif_tx_wake_queue(txq);
a2fbb9ea
ET
10319 }
10320 fp->tx_pkt++;
10321
10322 return NETDEV_TX_OK;
10323}
10324
bb2a0f7a 10325/* called with rtnl_lock */
a2fbb9ea
ET
10326static int bnx2x_open(struct net_device *dev)
10327{
10328 struct bnx2x *bp = netdev_priv(dev);
10329
6eccabb3
EG
10330 netif_carrier_off(dev);
10331
a2fbb9ea
ET
10332 bnx2x_set_power_state(bp, PCI_D0);
10333
bb2a0f7a 10334 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
10335}
10336
bb2a0f7a 10337/* called with rtnl_lock */
a2fbb9ea
ET
10338static int bnx2x_close(struct net_device *dev)
10339{
a2fbb9ea
ET
10340 struct bnx2x *bp = netdev_priv(dev);
10341
10342 /* Unload the driver, release IRQs */
bb2a0f7a
YG
10343 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10344 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10345 if (!CHIP_REV_IS_SLOW(bp))
10346 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
10347
10348 return 0;
10349}
10350
34f80b04
EG
10351/* called with netif_tx_lock from set_multicast */
10352static void bnx2x_set_rx_mode(struct net_device *dev)
10353{
10354 struct bnx2x *bp = netdev_priv(dev);
10355 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10356 int port = BP_PORT(bp);
10357
10358 if (bp->state != BNX2X_STATE_OPEN) {
10359 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10360 return;
10361 }
10362
10363 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10364
10365 if (dev->flags & IFF_PROMISC)
10366 rx_mode = BNX2X_RX_MODE_PROMISC;
10367
10368 else if ((dev->flags & IFF_ALLMULTI) ||
10369 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10370 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10371
10372 else { /* some multicasts */
10373 if (CHIP_IS_E1(bp)) {
10374 int i, old, offset;
10375 struct dev_mc_list *mclist;
10376 struct mac_configuration_cmd *config =
10377 bnx2x_sp(bp, mcast_config);
10378
10379 for (i = 0, mclist = dev->mc_list;
10380 mclist && (i < dev->mc_count);
10381 i++, mclist = mclist->next) {
10382
10383 config->config_table[i].
10384 cam_entry.msb_mac_addr =
10385 swab16(*(u16 *)&mclist->dmi_addr[0]);
10386 config->config_table[i].
10387 cam_entry.middle_mac_addr =
10388 swab16(*(u16 *)&mclist->dmi_addr[2]);
10389 config->config_table[i].
10390 cam_entry.lsb_mac_addr =
10391 swab16(*(u16 *)&mclist->dmi_addr[4]);
10392 config->config_table[i].cam_entry.flags =
10393 cpu_to_le16(port);
10394 config->config_table[i].
10395 target_table_entry.flags = 0;
10396 config->config_table[i].
10397 target_table_entry.client_id = 0;
10398 config->config_table[i].
10399 target_table_entry.vlan_id = 0;
10400
10401 DP(NETIF_MSG_IFUP,
10402 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10403 config->config_table[i].
10404 cam_entry.msb_mac_addr,
10405 config->config_table[i].
10406 cam_entry.middle_mac_addr,
10407 config->config_table[i].
10408 cam_entry.lsb_mac_addr);
10409 }
8d9c5f34 10410 old = config->hdr.length;
34f80b04
EG
10411 if (old > i) {
10412 for (; i < old; i++) {
10413 if (CAM_IS_INVALID(config->
10414 config_table[i])) {
af246401 10415 /* already invalidated */
34f80b04
EG
10416 break;
10417 }
10418 /* invalidate */
10419 CAM_INVALIDATE(config->
10420 config_table[i]);
10421 }
10422 }
10423
10424 if (CHIP_REV_IS_SLOW(bp))
10425 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10426 else
10427 offset = BNX2X_MAX_MULTICAST*(1 + port);
10428
8d9c5f34 10429 config->hdr.length = i;
34f80b04 10430 config->hdr.offset = offset;
8d9c5f34 10431 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
10432 config->hdr.reserved1 = 0;
10433
10434 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10435 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10436 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10437 0);
10438 } else { /* E1H */
10439 /* Accept one or more multicasts */
10440 struct dev_mc_list *mclist;
10441 u32 mc_filter[MC_HASH_SIZE];
10442 u32 crc, bit, regidx;
10443 int i;
10444
10445 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10446
10447 for (i = 0, mclist = dev->mc_list;
10448 mclist && (i < dev->mc_count);
10449 i++, mclist = mclist->next) {
10450
7c510e4b
JB
10451 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10452 mclist->dmi_addr);
34f80b04
EG
10453
10454 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10455 bit = (crc >> 24) & 0xff;
10456 regidx = bit >> 5;
10457 bit &= 0x1f;
10458 mc_filter[regidx] |= (1 << bit);
10459 }
10460
10461 for (i = 0; i < MC_HASH_SIZE; i++)
10462 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10463 mc_filter[i]);
10464 }
10465 }
10466
10467 bp->rx_mode = rx_mode;
10468 bnx2x_set_storm_rx_mode(bp);
10469}
10470
10471/* called with rtnl_lock */
a2fbb9ea
ET
10472static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10473{
10474 struct sockaddr *addr = p;
10475 struct bnx2x *bp = netdev_priv(dev);
10476
34f80b04 10477 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
10478 return -EINVAL;
10479
10480 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
10481 if (netif_running(dev)) {
10482 if (CHIP_IS_E1(bp))
3101c2bc 10483 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 10484 else
3101c2bc 10485 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 10486 }
a2fbb9ea
ET
10487
10488 return 0;
10489}
10490
c18487ee 10491/* called with rtnl_lock */
a2fbb9ea
ET
10492static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10493{
10494 struct mii_ioctl_data *data = if_mii(ifr);
10495 struct bnx2x *bp = netdev_priv(dev);
3196a88a 10496 int port = BP_PORT(bp);
a2fbb9ea
ET
10497 int err;
10498
10499 switch (cmd) {
10500 case SIOCGMIIPHY:
34f80b04 10501 data->phy_id = bp->port.phy_addr;
a2fbb9ea 10502
c14423fe 10503 /* fallthrough */
c18487ee 10504
a2fbb9ea 10505 case SIOCGMIIREG: {
c18487ee 10506 u16 mii_regval;
a2fbb9ea 10507
c18487ee
YR
10508 if (!netif_running(dev))
10509 return -EAGAIN;
a2fbb9ea 10510
34f80b04 10511 mutex_lock(&bp->port.phy_mutex);
3196a88a 10512 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10513 DEFAULT_PHY_DEV_ADDR,
10514 (data->reg_num & 0x1f), &mii_regval);
10515 data->val_out = mii_regval;
34f80b04 10516 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10517 return err;
10518 }
10519
10520 case SIOCSMIIREG:
10521 if (!capable(CAP_NET_ADMIN))
10522 return -EPERM;
10523
c18487ee
YR
10524 if (!netif_running(dev))
10525 return -EAGAIN;
10526
34f80b04 10527 mutex_lock(&bp->port.phy_mutex);
3196a88a 10528 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10529 DEFAULT_PHY_DEV_ADDR,
10530 (data->reg_num & 0x1f), data->val_in);
34f80b04 10531 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10532 return err;
10533
10534 default:
10535 /* do nothing */
10536 break;
10537 }
10538
10539 return -EOPNOTSUPP;
10540}
10541
34f80b04 10542/* called with rtnl_lock */
a2fbb9ea
ET
10543static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10544{
10545 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10546 int rc = 0;
a2fbb9ea
ET
10547
10548 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10549 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10550 return -EINVAL;
10551
10552 /* This does not race with packet allocation
c14423fe 10553 * because the actual alloc size is
a2fbb9ea
ET
10554 * only updated as part of load
10555 */
10556 dev->mtu = new_mtu;
10557
10558 if (netif_running(dev)) {
34f80b04
EG
10559 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10560 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 10561 }
34f80b04
EG
10562
10563 return rc;
a2fbb9ea
ET
10564}
10565
10566static void bnx2x_tx_timeout(struct net_device *dev)
10567{
10568 struct bnx2x *bp = netdev_priv(dev);
10569
10570#ifdef BNX2X_STOP_ON_ERROR
10571 if (!bp->panic)
10572 bnx2x_panic();
10573#endif
10574 /* This allows the netif to be shutdown gracefully before resetting */
10575 schedule_work(&bp->reset_task);
10576}
10577
10578#ifdef BCM_VLAN
34f80b04 10579/* called with rtnl_lock */
a2fbb9ea
ET
10580static void bnx2x_vlan_rx_register(struct net_device *dev,
10581 struct vlan_group *vlgrp)
10582{
10583 struct bnx2x *bp = netdev_priv(dev);
10584
10585 bp->vlgrp = vlgrp;
0c6671b0
EG
10586
10587 /* Set flags according to the required capabilities */
10588 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10589
10590 if (dev->features & NETIF_F_HW_VLAN_TX)
10591 bp->flags |= HW_VLAN_TX_FLAG;
10592
10593 if (dev->features & NETIF_F_HW_VLAN_RX)
10594 bp->flags |= HW_VLAN_RX_FLAG;
10595
a2fbb9ea 10596 if (netif_running(dev))
49d66772 10597 bnx2x_set_client_config(bp);
a2fbb9ea 10598}
34f80b04 10599
a2fbb9ea
ET
10600#endif
10601
10602#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10603static void poll_bnx2x(struct net_device *dev)
10604{
10605 struct bnx2x *bp = netdev_priv(dev);
10606
10607 disable_irq(bp->pdev->irq);
10608 bnx2x_interrupt(bp->pdev->irq, dev);
10609 enable_irq(bp->pdev->irq);
10610}
10611#endif
10612
c64213cd
SH
10613static const struct net_device_ops bnx2x_netdev_ops = {
10614 .ndo_open = bnx2x_open,
10615 .ndo_stop = bnx2x_close,
10616 .ndo_start_xmit = bnx2x_start_xmit,
10617 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10618 .ndo_set_mac_address = bnx2x_change_mac_addr,
10619 .ndo_validate_addr = eth_validate_addr,
10620 .ndo_do_ioctl = bnx2x_ioctl,
10621 .ndo_change_mtu = bnx2x_change_mtu,
10622 .ndo_tx_timeout = bnx2x_tx_timeout,
10623#ifdef BCM_VLAN
10624 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10625#endif
10626#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10627 .ndo_poll_controller = poll_bnx2x,
10628#endif
10629};
10630
10631
34f80b04
EG
10632static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10633 struct net_device *dev)
a2fbb9ea
ET
10634{
10635 struct bnx2x *bp;
10636 int rc;
10637
10638 SET_NETDEV_DEV(dev, &pdev->dev);
10639 bp = netdev_priv(dev);
10640
34f80b04
EG
10641 bp->dev = dev;
10642 bp->pdev = pdev;
a2fbb9ea 10643 bp->flags = 0;
34f80b04 10644 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
10645
10646 rc = pci_enable_device(pdev);
10647 if (rc) {
10648 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10649 goto err_out;
10650 }
10651
10652 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10653 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10654 " aborting\n");
10655 rc = -ENODEV;
10656 goto err_out_disable;
10657 }
10658
10659 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10660 printk(KERN_ERR PFX "Cannot find second PCI device"
10661 " base address, aborting\n");
10662 rc = -ENODEV;
10663 goto err_out_disable;
10664 }
10665
34f80b04
EG
10666 if (atomic_read(&pdev->enable_cnt) == 1) {
10667 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10668 if (rc) {
10669 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10670 " aborting\n");
10671 goto err_out_disable;
10672 }
a2fbb9ea 10673
34f80b04
EG
10674 pci_set_master(pdev);
10675 pci_save_state(pdev);
10676 }
a2fbb9ea
ET
10677
10678 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10679 if (bp->pm_cap == 0) {
10680 printk(KERN_ERR PFX "Cannot find power management"
10681 " capability, aborting\n");
10682 rc = -EIO;
10683 goto err_out_release;
10684 }
10685
10686 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10687 if (bp->pcie_cap == 0) {
10688 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10689 " aborting\n");
10690 rc = -EIO;
10691 goto err_out_release;
10692 }
10693
10694 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10695 bp->flags |= USING_DAC_FLAG;
10696 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10697 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10698 " failed, aborting\n");
10699 rc = -EIO;
10700 goto err_out_release;
10701 }
10702
10703 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10704 printk(KERN_ERR PFX "System does not support DMA,"
10705 " aborting\n");
10706 rc = -EIO;
10707 goto err_out_release;
10708 }
10709
34f80b04
EG
10710 dev->mem_start = pci_resource_start(pdev, 0);
10711 dev->base_addr = dev->mem_start;
10712 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
10713
10714 dev->irq = pdev->irq;
10715
275f165f 10716 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
10717 if (!bp->regview) {
10718 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10719 rc = -ENOMEM;
10720 goto err_out_release;
10721 }
10722
34f80b04
EG
10723 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10724 min_t(u64, BNX2X_DB_SIZE,
10725 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
10726 if (!bp->doorbells) {
10727 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10728 rc = -ENOMEM;
10729 goto err_out_unmap;
10730 }
10731
10732 bnx2x_set_power_state(bp, PCI_D0);
10733
34f80b04
EG
10734 /* clean indirect addresses */
10735 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10736 PCICFG_VENDOR_ID_OFFSET);
10737 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10738 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10739 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10740 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 10741
34f80b04 10742 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 10743
c64213cd 10744 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 10745 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
10746 dev->features |= NETIF_F_SG;
10747 dev->features |= NETIF_F_HW_CSUM;
10748 if (bp->flags & USING_DAC_FLAG)
10749 dev->features |= NETIF_F_HIGHDMA;
10750#ifdef BCM_VLAN
10751 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 10752 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
34f80b04
EG
10753#endif
10754 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 10755 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
10756
10757 return 0;
10758
10759err_out_unmap:
10760 if (bp->regview) {
10761 iounmap(bp->regview);
10762 bp->regview = NULL;
10763 }
a2fbb9ea
ET
10764 if (bp->doorbells) {
10765 iounmap(bp->doorbells);
10766 bp->doorbells = NULL;
10767 }
10768
10769err_out_release:
34f80b04
EG
10770 if (atomic_read(&pdev->enable_cnt) == 1)
10771 pci_release_regions(pdev);
a2fbb9ea
ET
10772
10773err_out_disable:
10774 pci_disable_device(pdev);
10775 pci_set_drvdata(pdev, NULL);
10776
10777err_out:
10778 return rc;
10779}
10780
25047950
ET
10781static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10782{
10783 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10784
10785 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10786 return val;
10787}
10788
10789/* return value of 1=2.5GHz 2=5GHz */
10790static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10791{
10792 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10793
10794 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10795 return val;
10796}
10797
a2fbb9ea
ET
10798static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10799 const struct pci_device_id *ent)
10800{
10801 static int version_printed;
10802 struct net_device *dev = NULL;
10803 struct bnx2x *bp;
25047950 10804 int rc;
a2fbb9ea
ET
10805
10806 if (version_printed++ == 0)
10807 printk(KERN_INFO "%s", version);
10808
10809 /* dev zeroed in init_etherdev */
555f6c78 10810 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
10811 if (!dev) {
10812 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 10813 return -ENOMEM;
34f80b04 10814 }
a2fbb9ea 10815
a2fbb9ea
ET
10816 bp = netdev_priv(dev);
10817 bp->msglevel = debug;
10818
34f80b04 10819 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
10820 if (rc < 0) {
10821 free_netdev(dev);
10822 return rc;
10823 }
10824
a2fbb9ea
ET
10825 pci_set_drvdata(pdev, dev);
10826
34f80b04 10827 rc = bnx2x_init_bp(bp);
693fc0d1
EG
10828 if (rc)
10829 goto init_one_exit;
10830
10831 rc = register_netdev(dev);
34f80b04 10832 if (rc) {
693fc0d1 10833 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
10834 goto init_one_exit;
10835 }
10836
10837 bp->common.name = board_info[ent->driver_data].name;
25047950 10838 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
34f80b04
EG
10839 " IRQ %d, ", dev->name, bp->common.name,
10840 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
10841 bnx2x_get_pcie_width(bp),
10842 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10843 dev->base_addr, bp->pdev->irq);
e174961c 10844 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
a2fbb9ea 10845 return 0;
34f80b04
EG
10846
10847init_one_exit:
10848 if (bp->regview)
10849 iounmap(bp->regview);
10850
10851 if (bp->doorbells)
10852 iounmap(bp->doorbells);
10853
10854 free_netdev(dev);
10855
10856 if (atomic_read(&pdev->enable_cnt) == 1)
10857 pci_release_regions(pdev);
10858
10859 pci_disable_device(pdev);
10860 pci_set_drvdata(pdev, NULL);
10861
10862 return rc;
a2fbb9ea
ET
10863}
10864
10865static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10866{
10867 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10868 struct bnx2x *bp;
10869
10870 if (!dev) {
228241eb
ET
10871 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10872 return;
10873 }
228241eb 10874 bp = netdev_priv(dev);
a2fbb9ea 10875
a2fbb9ea
ET
10876 unregister_netdev(dev);
10877
10878 if (bp->regview)
10879 iounmap(bp->regview);
10880
10881 if (bp->doorbells)
10882 iounmap(bp->doorbells);
10883
10884 free_netdev(dev);
34f80b04
EG
10885
10886 if (atomic_read(&pdev->enable_cnt) == 1)
10887 pci_release_regions(pdev);
10888
a2fbb9ea
ET
10889 pci_disable_device(pdev);
10890 pci_set_drvdata(pdev, NULL);
10891}
10892
10893static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10894{
10895 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10896 struct bnx2x *bp;
10897
34f80b04
EG
10898 if (!dev) {
10899 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10900 return -ENODEV;
10901 }
10902 bp = netdev_priv(dev);
a2fbb9ea 10903
34f80b04 10904 rtnl_lock();
a2fbb9ea 10905
34f80b04 10906 pci_save_state(pdev);
228241eb 10907
34f80b04
EG
10908 if (!netif_running(dev)) {
10909 rtnl_unlock();
10910 return 0;
10911 }
a2fbb9ea
ET
10912
10913 netif_device_detach(dev);
a2fbb9ea 10914
da5a662a 10915 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 10916
a2fbb9ea 10917 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 10918
34f80b04
EG
10919 rtnl_unlock();
10920
a2fbb9ea
ET
10921 return 0;
10922}
10923
10924static int bnx2x_resume(struct pci_dev *pdev)
10925{
10926 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 10927 struct bnx2x *bp;
a2fbb9ea
ET
10928 int rc;
10929
228241eb
ET
10930 if (!dev) {
10931 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10932 return -ENODEV;
10933 }
228241eb 10934 bp = netdev_priv(dev);
a2fbb9ea 10935
34f80b04
EG
10936 rtnl_lock();
10937
228241eb 10938 pci_restore_state(pdev);
34f80b04
EG
10939
10940 if (!netif_running(dev)) {
10941 rtnl_unlock();
10942 return 0;
10943 }
10944
a2fbb9ea
ET
10945 bnx2x_set_power_state(bp, PCI_D0);
10946 netif_device_attach(dev);
10947
da5a662a 10948 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 10949
34f80b04
EG
10950 rtnl_unlock();
10951
10952 return rc;
a2fbb9ea
ET
10953}
10954
f8ef6e44
YG
10955static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10956{
10957 int i;
10958
10959 bp->state = BNX2X_STATE_ERROR;
10960
10961 bp->rx_mode = BNX2X_RX_MODE_NONE;
10962
10963 bnx2x_netif_stop(bp, 0);
10964
10965 del_timer_sync(&bp->timer);
10966 bp->stats_state = STATS_STATE_DISABLED;
10967 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
10968
10969 /* Release IRQs */
10970 bnx2x_free_irq(bp);
10971
10972 if (CHIP_IS_E1(bp)) {
10973 struct mac_configuration_cmd *config =
10974 bnx2x_sp(bp, mcast_config);
10975
8d9c5f34 10976 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
10977 CAM_INVALIDATE(config->config_table[i]);
10978 }
10979
10980 /* Free SKBs, SGEs, TPA pool and driver internals */
10981 bnx2x_free_skbs(bp);
555f6c78 10982 for_each_rx_queue(bp, i)
f8ef6e44 10983 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 10984 for_each_rx_queue(bp, i)
7cde1c8b 10985 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
10986 bnx2x_free_mem(bp);
10987
10988 bp->state = BNX2X_STATE_CLOSED;
10989
10990 netif_carrier_off(bp->dev);
10991
10992 return 0;
10993}
10994
10995static void bnx2x_eeh_recover(struct bnx2x *bp)
10996{
10997 u32 val;
10998
10999 mutex_init(&bp->port.phy_mutex);
11000
11001 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11002 bp->link_params.shmem_base = bp->common.shmem_base;
11003 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11004
11005 if (!bp->common.shmem_base ||
11006 (bp->common.shmem_base < 0xA0000) ||
11007 (bp->common.shmem_base >= 0xC0000)) {
11008 BNX2X_DEV_INFO("MCP not active\n");
11009 bp->flags |= NO_MCP_FLAG;
11010 return;
11011 }
11012
11013 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11014 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11015 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11016 BNX2X_ERR("BAD MCP validity signature\n");
11017
11018 if (!BP_NOMCP(bp)) {
11019 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11020 & DRV_MSG_SEQ_NUMBER_MASK);
11021 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11022 }
11023}
11024
493adb1f
WX
11025/**
11026 * bnx2x_io_error_detected - called when PCI error is detected
11027 * @pdev: Pointer to PCI device
11028 * @state: The current pci connection state
11029 *
11030 * This function is called after a PCI bus error affecting
11031 * this device has been detected.
11032 */
11033static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11034 pci_channel_state_t state)
11035{
11036 struct net_device *dev = pci_get_drvdata(pdev);
11037 struct bnx2x *bp = netdev_priv(dev);
11038
11039 rtnl_lock();
11040
11041 netif_device_detach(dev);
11042
11043 if (netif_running(dev))
f8ef6e44 11044 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
11045
11046 pci_disable_device(pdev);
11047
11048 rtnl_unlock();
11049
11050 /* Request a slot reset */
11051 return PCI_ERS_RESULT_NEED_RESET;
11052}
11053
11054/**
11055 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11056 * @pdev: Pointer to PCI device
11057 *
11058 * Restart the card from scratch, as if from a cold-boot.
11059 */
11060static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11061{
11062 struct net_device *dev = pci_get_drvdata(pdev);
11063 struct bnx2x *bp = netdev_priv(dev);
11064
11065 rtnl_lock();
11066
11067 if (pci_enable_device(pdev)) {
11068 dev_err(&pdev->dev,
11069 "Cannot re-enable PCI device after reset\n");
11070 rtnl_unlock();
11071 return PCI_ERS_RESULT_DISCONNECT;
11072 }
11073
11074 pci_set_master(pdev);
11075 pci_restore_state(pdev);
11076
11077 if (netif_running(dev))
11078 bnx2x_set_power_state(bp, PCI_D0);
11079
11080 rtnl_unlock();
11081
11082 return PCI_ERS_RESULT_RECOVERED;
11083}
11084
11085/**
11086 * bnx2x_io_resume - called when traffic can start flowing again
11087 * @pdev: Pointer to PCI device
11088 *
11089 * This callback is called when the error recovery driver tells us that
11090 * its OK to resume normal operation.
11091 */
11092static void bnx2x_io_resume(struct pci_dev *pdev)
11093{
11094 struct net_device *dev = pci_get_drvdata(pdev);
11095 struct bnx2x *bp = netdev_priv(dev);
11096
11097 rtnl_lock();
11098
f8ef6e44
YG
11099 bnx2x_eeh_recover(bp);
11100
493adb1f 11101 if (netif_running(dev))
f8ef6e44 11102 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
11103
11104 netif_device_attach(dev);
11105
11106 rtnl_unlock();
11107}
11108
11109static struct pci_error_handlers bnx2x_err_handler = {
11110 .error_detected = bnx2x_io_error_detected,
11111 .slot_reset = bnx2x_io_slot_reset,
11112 .resume = bnx2x_io_resume,
11113};
11114
a2fbb9ea 11115static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
11116 .name = DRV_MODULE_NAME,
11117 .id_table = bnx2x_pci_tbl,
11118 .probe = bnx2x_init_one,
11119 .remove = __devexit_p(bnx2x_remove_one),
11120 .suspend = bnx2x_suspend,
11121 .resume = bnx2x_resume,
11122 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
11123};
11124
11125static int __init bnx2x_init(void)
11126{
1cf167f2
EG
11127 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11128 if (bnx2x_wq == NULL) {
11129 printk(KERN_ERR PFX "Cannot create workqueue\n");
11130 return -ENOMEM;
11131 }
11132
a2fbb9ea
ET
11133 return pci_register_driver(&bnx2x_pci_driver);
11134}
11135
11136static void __exit bnx2x_cleanup(void)
11137{
11138 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
11139
11140 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
11141}
11142
11143module_init(bnx2x_init);
11144module_exit(bnx2x_cleanup);
11145