]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
bnx2x: Removing the board type
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
53#include "bnx2x_reg.h"
54#include "bnx2x_fw_defs.h"
55#include "bnx2x_hsi.h"
c18487ee 56#include "bnx2x_link.h"
a2fbb9ea
ET
57#include "bnx2x.h"
58#include "bnx2x_init.h"
59
e8b5fc51
VZ
60#define DRV_MODULE_VERSION "1.45.26"
61#define DRV_MODULE_RELDATE "2009/01/26"
34f80b04 62#define BNX2X_BC_VER 0x040200
a2fbb9ea 63
34f80b04
EG
64/* Time in jiffies before concluding the transmitter is hung */
65#define TX_TIMEOUT (5*HZ)
a2fbb9ea 66
53a10565 67static char version[] __devinitdata =
34f80b04 68 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
69 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70
24e3fcef 71MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 72MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
73MODULE_LICENSE("GPL");
74MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 75
555f6c78
EG
76static int multi_mode = 1;
77module_param(multi_mode, int, 0);
78
19680c48 79static int disable_tpa;
a2fbb9ea 80static int poll;
a2fbb9ea 81static int debug;
34f80b04 82static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 83
19680c48 84module_param(disable_tpa, int, 0);
8badd27a
EG
85
86static int int_mode;
87module_param(int_mode, int, 0);
88MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
89
a2fbb9ea 90module_param(poll, int, 0);
a2fbb9ea 91module_param(debug, int, 0);
19680c48 92MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
a2fbb9ea 93MODULE_PARM_DESC(poll, "use polling (for debug)");
c14423fe 94MODULE_PARM_DESC(debug, "default debug msglevel");
a2fbb9ea 95
1cf167f2 96static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
97
98enum bnx2x_board_type {
99 BCM57710 = 0,
34f80b04
EG
100 BCM57711 = 1,
101 BCM57711E = 2,
a2fbb9ea
ET
102};
103
34f80b04 104/* indexed by board_type, above */
53a10565 105static struct {
a2fbb9ea
ET
106 char *name;
107} board_info[] __devinitdata = {
34f80b04
EG
108 { "Broadcom NetXtreme II BCM57710 XGb" },
109 { "Broadcom NetXtreme II BCM57711 XGb" },
110 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
111};
112
34f80b04 113
a2fbb9ea
ET
114static const struct pci_device_id bnx2x_pci_tbl[] = {
115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
116 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
121 { 0 }
122};
123
124MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
125
126/****************************************************************************
127* General service functions
128****************************************************************************/
129
130/* used only at init
131 * locking is done by mcp
132 */
133static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
134{
135 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
136 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
138 PCICFG_VENDOR_ID_OFFSET);
139}
140
a2fbb9ea
ET
141static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
142{
143 u32 val;
144
145 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
146 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
148 PCICFG_VENDOR_ID_OFFSET);
149
150 return val;
151}
a2fbb9ea
ET
152
153static const u32 dmae_reg_go_c[] = {
154 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
158};
159
160/* copy command into DMAE command memory and set DMAE command go */
161static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
162 int idx)
163{
164 u32 cmd_offset;
165 int i;
166
167 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
170
ad8d3948
EG
171 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
173 }
174 REG_WR(bp, dmae_reg_go_c[idx], 1);
175}
176
ad8d3948
EG
177void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
178 u32 len32)
a2fbb9ea 179{
ad8d3948 180 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 181 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
182 int cnt = 200;
183
184 if (!bp->dmae_ready) {
185 u32 *data = bnx2x_sp(bp, wb_data[0]);
186
187 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
188 " using indirect\n", dst_addr, len32);
189 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
190 return;
191 }
192
193 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
194
195 memset(dmae, 0, sizeof(struct dmae_command));
196
197 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
198 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
199 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
200#ifdef __BIG_ENDIAN
201 DMAE_CMD_ENDIANITY_B_DW_SWAP |
202#else
203 DMAE_CMD_ENDIANITY_DW_SWAP |
204#endif
34f80b04
EG
205 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
206 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
207 dmae->src_addr_lo = U64_LO(dma_addr);
208 dmae->src_addr_hi = U64_HI(dma_addr);
209 dmae->dst_addr_lo = dst_addr >> 2;
210 dmae->dst_addr_hi = 0;
211 dmae->len = len32;
212 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
213 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 214 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 215
ad8d3948 216 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
217 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
218 "dst_addr [%x:%08x (%08x)]\n"
219 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
220 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
221 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
222 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 223 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
224 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
225 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
226
227 *wb_comp = 0;
228
34f80b04 229 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
230
231 udelay(5);
ad8d3948
EG
232
233 while (*wb_comp != DMAE_COMP_VAL) {
234 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
235
ad8d3948 236 if (!cnt) {
a2fbb9ea
ET
237 BNX2X_ERR("dmae timeout!\n");
238 break;
239 }
ad8d3948 240 cnt--;
12469401
YG
241 /* adjust delay for emulation/FPGA */
242 if (CHIP_REV_IS_SLOW(bp))
243 msleep(100);
244 else
245 udelay(5);
a2fbb9ea 246 }
ad8d3948
EG
247
248 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
249}
250
c18487ee 251void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 252{
ad8d3948 253 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 254 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
255 int cnt = 200;
256
257 if (!bp->dmae_ready) {
258 u32 *data = bnx2x_sp(bp, wb_data[0]);
259 int i;
260
261 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
262 " using indirect\n", src_addr, len32);
263 for (i = 0; i < len32; i++)
264 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
265 return;
266 }
267
268 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
269
270 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
271 memset(dmae, 0, sizeof(struct dmae_command));
272
273 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
274 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
275 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
276#ifdef __BIG_ENDIAN
277 DMAE_CMD_ENDIANITY_B_DW_SWAP |
278#else
279 DMAE_CMD_ENDIANITY_DW_SWAP |
280#endif
34f80b04
EG
281 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
282 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
283 dmae->src_addr_lo = src_addr >> 2;
284 dmae->src_addr_hi = 0;
285 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
286 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
287 dmae->len = len32;
288 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
289 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 290 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 291
ad8d3948 292 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
293 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
294 "dst_addr [%x:%08x (%08x)]\n"
295 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
296 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
297 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
298 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
299
300 *wb_comp = 0;
301
34f80b04 302 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
303
304 udelay(5);
ad8d3948
EG
305
306 while (*wb_comp != DMAE_COMP_VAL) {
307
ad8d3948 308 if (!cnt) {
a2fbb9ea
ET
309 BNX2X_ERR("dmae timeout!\n");
310 break;
311 }
ad8d3948 312 cnt--;
12469401
YG
313 /* adjust delay for emulation/FPGA */
314 if (CHIP_REV_IS_SLOW(bp))
315 msleep(100);
316 else
317 udelay(5);
a2fbb9ea 318 }
ad8d3948 319 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
320 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
321 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
322
323 mutex_unlock(&bp->dmae_mutex);
324}
325
326/* used only for slowpath so not inlined */
327static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
328{
329 u32 wb_write[2];
330
331 wb_write[0] = val_hi;
332 wb_write[1] = val_lo;
333 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 334}
a2fbb9ea 335
ad8d3948
EG
336#ifdef USE_WB_RD
337static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
338{
339 u32 wb_data[2];
340
341 REG_RD_DMAE(bp, reg, wb_data, 2);
342
343 return HILO_U64(wb_data[0], wb_data[1]);
344}
345#endif
346
a2fbb9ea
ET
347static int bnx2x_mc_assert(struct bnx2x *bp)
348{
a2fbb9ea 349 char last_idx;
34f80b04
EG
350 int i, rc = 0;
351 u32 row0, row1, row2, row3;
352
353 /* XSTORM */
354 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
355 XSTORM_ASSERT_LIST_INDEX_OFFSET);
356 if (last_idx)
357 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
358
359 /* print the asserts */
360 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
361
362 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
363 XSTORM_ASSERT_LIST_OFFSET(i));
364 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
366 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
368 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
370
371 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
372 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
373 " 0x%08x 0x%08x 0x%08x\n",
374 i, row3, row2, row1, row0);
375 rc++;
376 } else {
377 break;
378 }
379 }
380
381 /* TSTORM */
382 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
383 TSTORM_ASSERT_LIST_INDEX_OFFSET);
384 if (last_idx)
385 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
386
387 /* print the asserts */
388 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
389
390 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
391 TSTORM_ASSERT_LIST_OFFSET(i));
392 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
394 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
396 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
398
399 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
400 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
401 " 0x%08x 0x%08x 0x%08x\n",
402 i, row3, row2, row1, row0);
403 rc++;
404 } else {
405 break;
406 }
407 }
408
409 /* CSTORM */
410 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
411 CSTORM_ASSERT_LIST_INDEX_OFFSET);
412 if (last_idx)
413 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
414
415 /* print the asserts */
416 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
417
418 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
419 CSTORM_ASSERT_LIST_OFFSET(i));
420 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
422 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
424 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
426
427 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
428 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
429 " 0x%08x 0x%08x 0x%08x\n",
430 i, row3, row2, row1, row0);
431 rc++;
432 } else {
433 break;
434 }
435 }
436
437 /* USTORM */
438 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
439 USTORM_ASSERT_LIST_INDEX_OFFSET);
440 if (last_idx)
441 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
442
443 /* print the asserts */
444 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
445
446 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
447 USTORM_ASSERT_LIST_OFFSET(i));
448 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i) + 4);
450 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 8);
452 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 12);
454
455 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
456 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
457 " 0x%08x 0x%08x 0x%08x\n",
458 i, row3, row2, row1, row0);
459 rc++;
460 } else {
461 break;
a2fbb9ea
ET
462 }
463 }
34f80b04 464
a2fbb9ea
ET
465 return rc;
466}
c14423fe 467
a2fbb9ea
ET
468static void bnx2x_fw_dump(struct bnx2x *bp)
469{
470 u32 mark, offset;
471 u32 data[9];
472 int word;
473
474 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
475 mark = ((mark + 0x3) & ~0x3);
476 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
477
478 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
479 for (word = 0; word < 8; word++)
480 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
481 offset + 4*word));
482 data[8] = 0x0;
49d66772 483 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
484 }
485 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
486 for (word = 0; word < 8; word++)
487 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
488 offset + 4*word));
489 data[8] = 0x0;
49d66772 490 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
491 }
492 printk("\n" KERN_ERR PFX "end of fw dump\n");
493}
494
495static void bnx2x_panic_dump(struct bnx2x *bp)
496{
497 int i;
498 u16 j, start, end;
499
66e855f3
YG
500 bp->stats_state = STATS_STATE_DISABLED;
501 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
502
a2fbb9ea
ET
503 BNX2X_ERR("begin crash dump -----------------\n");
504
505 for_each_queue(bp, i) {
506 struct bnx2x_fastpath *fp = &bp->fp[i];
507 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
508
509 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
34f80b04 510 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
a2fbb9ea 511 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
34f80b04 512 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
66e855f3
YG
513 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
514 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
515 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
516 fp->rx_bd_prod, fp->rx_bd_cons,
517 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
518 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
519 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
520 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
521 " *sb_u_idx(%x) bd data(%x,%x)\n",
522 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
523 fp->status_blk->c_status_block.status_block_index,
524 fp->fp_u_idx,
525 fp->status_blk->u_status_block.status_block_index,
526 hw_prods->packets_prod, hw_prods->bds_prod);
a2fbb9ea
ET
527
528 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
529 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
530 for (j = start; j < end; j++) {
531 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
532
533 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
534 sw_bd->skb, sw_bd->first_bd);
535 }
536
537 start = TX_BD(fp->tx_bd_cons - 10);
538 end = TX_BD(fp->tx_bd_cons + 254);
539 for (j = start; j < end; j++) {
540 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
541
542 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
543 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
544 }
545
546 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
547 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
548 for (j = start; j < end; j++) {
549 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
550 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
551
552 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
34f80b04 553 j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
554 }
555
3196a88a
EG
556 start = RX_SGE(fp->rx_sge_prod);
557 end = RX_SGE(fp->last_max_sge);
7a9b2557
VZ
558 for (j = start; j < end; j++) {
559 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
560 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
561
562 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
563 j, rx_sge[1], rx_sge[0], sw_page->page);
564 }
565
a2fbb9ea
ET
566 start = RCQ_BD(fp->rx_comp_cons - 10);
567 end = RCQ_BD(fp->rx_comp_cons + 503);
568 for (j = start; j < end; j++) {
569 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
570
571 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
572 j, cqe[0], cqe[1], cqe[2], cqe[3]);
573 }
574 }
575
49d66772
ET
576 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
577 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
a2fbb9ea 578 " spq_prod_idx(%u)\n",
49d66772 579 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
a2fbb9ea
ET
580 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
581
34f80b04 582 bnx2x_fw_dump(bp);
a2fbb9ea
ET
583 bnx2x_mc_assert(bp);
584 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
585}
586
615f8fd9 587static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 588{
34f80b04 589 int port = BP_PORT(bp);
a2fbb9ea
ET
590 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
591 u32 val = REG_RD(bp, addr);
592 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 593 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
594
595 if (msix) {
8badd27a
EG
596 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
597 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
598 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
599 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
600 } else if (msi) {
601 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
602 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
603 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
604 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
605 } else {
606 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 607 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
608 HC_CONFIG_0_REG_INT_LINE_EN_0 |
609 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 610
8badd27a
EG
611 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
612 val, port, addr);
615f8fd9
ET
613
614 REG_WR(bp, addr, val);
615
a2fbb9ea
ET
616 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
617 }
618
8badd27a
EG
619 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
620 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
621
622 REG_WR(bp, addr, val);
34f80b04
EG
623
624 if (CHIP_IS_E1H(bp)) {
625 /* init leading/trailing edge */
626 if (IS_E1HMF(bp)) {
8badd27a 627 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04
EG
628 if (bp->port.pmf)
629 /* enable nig attention */
630 val |= 0x0100;
631 } else
632 val = 0xffff;
633
634 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
635 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
636 }
a2fbb9ea
ET
637}
638
615f8fd9 639static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 640{
34f80b04 641 int port = BP_PORT(bp);
a2fbb9ea
ET
642 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
643 u32 val = REG_RD(bp, addr);
644
645 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
646 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
647 HC_CONFIG_0_REG_INT_LINE_EN_0 |
648 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
649
650 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
651 val, port, addr);
652
8badd27a
EG
653 /* flush all outstanding writes */
654 mmiowb();
655
a2fbb9ea
ET
656 REG_WR(bp, addr, val);
657 if (REG_RD(bp, addr) != val)
658 BNX2X_ERR("BUG! proper val not read from IGU!\n");
659}
660
f8ef6e44 661static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 662{
a2fbb9ea 663 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 664 int i, offset;
a2fbb9ea 665
34f80b04 666 /* disable interrupt handling */
a2fbb9ea 667 atomic_inc(&bp->intr_sem);
f8ef6e44
YG
668 if (disable_hw)
669 /* prevent the HW from sending interrupts */
670 bnx2x_int_disable(bp);
a2fbb9ea
ET
671
672 /* make sure all ISRs are done */
673 if (msix) {
8badd27a
EG
674 synchronize_irq(bp->msix_table[0].vector);
675 offset = 1;
a2fbb9ea 676 for_each_queue(bp, i)
8badd27a 677 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
678 } else
679 synchronize_irq(bp->pdev->irq);
680
681 /* make sure sp_task is not running */
1cf167f2
EG
682 cancel_delayed_work(&bp->sp_task);
683 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
684}
685
34f80b04 686/* fast path */
a2fbb9ea
ET
687
688/*
34f80b04 689 * General service functions
a2fbb9ea
ET
690 */
691
34f80b04 692static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
693 u8 storm, u16 index, u8 op, u8 update)
694{
5c862848
EG
695 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
696 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
697 struct igu_ack_register igu_ack;
698
699 igu_ack.status_block_index = index;
700 igu_ack.sb_id_and_flags =
34f80b04 701 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
702 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
703 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
704 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
705
5c862848
EG
706 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
707 (*(u32 *)&igu_ack), hc_addr);
708 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
a2fbb9ea
ET
709}
710
711static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
712{
713 struct host_status_block *fpsb = fp->status_blk;
714 u16 rc = 0;
715
716 barrier(); /* status block is written to by the chip */
717 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
718 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
719 rc |= 1;
720 }
721 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
722 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
723 rc |= 2;
724 }
725 return rc;
726}
727
a2fbb9ea
ET
728static u16 bnx2x_ack_int(struct bnx2x *bp)
729{
5c862848
EG
730 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
731 COMMAND_REG_SIMD_MASK);
732 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 733
5c862848
EG
734 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
735 result, hc_addr);
a2fbb9ea 736
a2fbb9ea
ET
737 return result;
738}
739
740
741/*
742 * fast path service functions
743 */
744
237907c1
EG
745static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
746{
747 u16 tx_cons_sb;
748
749 /* Tell compiler that status block fields can change */
750 barrier();
751 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
e8b5fc51
VZ
752 return (fp->tx_pkt_cons != tx_cons_sb);
753}
754
755static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
756{
757 /* Tell compiler that consumer and producer can change */
758 barrier();
759 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
760
237907c1
EG
761}
762
a2fbb9ea
ET
763/* free skb in the packet ring at pos idx
764 * return idx of last bd freed
765 */
766static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
767 u16 idx)
768{
769 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
770 struct eth_tx_bd *tx_bd;
771 struct sk_buff *skb = tx_buf->skb;
34f80b04 772 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
773 int nbd;
774
775 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
776 idx, tx_buf, skb);
777
778 /* unmap first bd */
779 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
780 tx_bd = &fp->tx_desc_ring[bd_idx];
781 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
782 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
783
784 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 785 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
786#ifdef BNX2X_STOP_ON_ERROR
787 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 788 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
789 bnx2x_panic();
790 }
791#endif
792
793 /* Skip a parse bd and the TSO split header bd
794 since they have no mapping */
795 if (nbd)
796 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
797
798 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
799 ETH_TX_BD_FLAGS_TCP_CSUM |
800 ETH_TX_BD_FLAGS_SW_LSO)) {
801 if (--nbd)
802 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
803 tx_bd = &fp->tx_desc_ring[bd_idx];
804 /* is this a TSO split header bd? */
805 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
806 if (--nbd)
807 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
808 }
809 }
810
811 /* now free frags */
812 while (nbd > 0) {
813
814 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
815 tx_bd = &fp->tx_desc_ring[bd_idx];
816 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
817 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
818 if (--nbd)
819 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
820 }
821
822 /* release skb */
53e5e96e 823 WARN_ON(!skb);
a2fbb9ea
ET
824 dev_kfree_skb(skb);
825 tx_buf->first_bd = 0;
826 tx_buf->skb = NULL;
827
34f80b04 828 return new_cons;
a2fbb9ea
ET
829}
830
34f80b04 831static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 832{
34f80b04
EG
833 s16 used;
834 u16 prod;
835 u16 cons;
a2fbb9ea 836
34f80b04 837 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
838 prod = fp->tx_bd_prod;
839 cons = fp->tx_bd_cons;
840
34f80b04
EG
841 /* NUM_TX_RINGS = number of "next-page" entries
842 It will be used as a threshold */
843 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 844
34f80b04 845#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
846 WARN_ON(used < 0);
847 WARN_ON(used > fp->bp->tx_ring_size);
848 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 849#endif
a2fbb9ea 850
34f80b04 851 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
852}
853
854static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
855{
856 struct bnx2x *bp = fp->bp;
555f6c78 857 struct netdev_queue *txq;
a2fbb9ea
ET
858 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
859 int done = 0;
860
861#ifdef BNX2X_STOP_ON_ERROR
862 if (unlikely(bp->panic))
863 return;
864#endif
865
555f6c78 866 txq = netdev_get_tx_queue(bp->dev, fp->index);
a2fbb9ea
ET
867 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
868 sw_cons = fp->tx_pkt_cons;
869
870 while (sw_cons != hw_cons) {
871 u16 pkt_cons;
872
873 pkt_cons = TX_BD(sw_cons);
874
875 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
876
34f80b04 877 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
878 hw_cons, sw_cons, pkt_cons);
879
34f80b04 880/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
881 rmb();
882 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
883 }
884*/
885 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
886 sw_cons++;
887 done++;
888
889 if (done == work)
890 break;
891 }
892
893 fp->tx_pkt_cons = sw_cons;
894 fp->tx_bd_cons = bd_cons;
895
555f6c78
EG
896 /* Need to make the tx_bd_cons update visible to start_xmit()
897 * before checking for netif_tx_queue_stopped(). Without the
a2fbb9ea
ET
898 * memory barrier, there is a small possibility that start_xmit()
899 * will miss it and cause the queue to be stopped forever.
900 */
901 smp_mb();
902
903 /* TBD need a thresh? */
555f6c78 904 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 905
555f6c78 906 __netif_tx_lock(txq, smp_processor_id());
a2fbb9ea 907
555f6c78 908 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 909 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 910 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 911 netif_tx_wake_queue(txq);
a2fbb9ea 912
555f6c78 913 __netif_tx_unlock(txq);
a2fbb9ea
ET
914 }
915}
916
3196a88a 917
a2fbb9ea
ET
918static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
919 union eth_rx_cqe *rr_cqe)
920{
921 struct bnx2x *bp = fp->bp;
922 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
923 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
924
34f80b04 925 DP(BNX2X_MSG_SP,
a2fbb9ea 926 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
34f80b04
EG
927 FP_IDX(fp), cid, command, bp->state,
928 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
929
930 bp->spq_left++;
931
34f80b04 932 if (FP_IDX(fp)) {
a2fbb9ea
ET
933 switch (command | fp->state) {
934 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
935 BNX2X_FP_STATE_OPENING):
936 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
937 cid);
938 fp->state = BNX2X_FP_STATE_OPEN;
939 break;
940
941 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
942 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
943 cid);
944 fp->state = BNX2X_FP_STATE_HALTED;
945 break;
946
947 default:
34f80b04
EG
948 BNX2X_ERR("unexpected MC reply (%d) "
949 "fp->state is %x\n", command, fp->state);
950 break;
a2fbb9ea 951 }
34f80b04 952 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
953 return;
954 }
c14423fe 955
a2fbb9ea
ET
956 switch (command | bp->state) {
957 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
958 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
959 bp->state = BNX2X_STATE_OPEN;
960 break;
961
962 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
963 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
964 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
965 fp->state = BNX2X_FP_STATE_HALTED;
966 break;
967
a2fbb9ea 968 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 969 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 970 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
971 break;
972
3196a88a 973
a2fbb9ea 974 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 975 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 976 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 977 bp->set_mac_pending = 0;
a2fbb9ea
ET
978 break;
979
49d66772 980 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 981 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
982 break;
983
a2fbb9ea 984 default:
34f80b04 985 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 986 command, bp->state);
34f80b04 987 break;
a2fbb9ea 988 }
34f80b04 989 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
990}
991
7a9b2557
VZ
992static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
993 struct bnx2x_fastpath *fp, u16 index)
994{
995 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
996 struct page *page = sw_buf->page;
997 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
998
999 /* Skip "next page" elements */
1000 if (!page)
1001 return;
1002
1003 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1004 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1005 __free_pages(page, PAGES_PER_SGE_SHIFT);
1006
1007 sw_buf->page = NULL;
1008 sge->addr_hi = 0;
1009 sge->addr_lo = 0;
1010}
1011
1012static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1013 struct bnx2x_fastpath *fp, int last)
1014{
1015 int i;
1016
1017 for (i = 0; i < last; i++)
1018 bnx2x_free_rx_sge(bp, fp, i);
1019}
1020
1021static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1022 struct bnx2x_fastpath *fp, u16 index)
1023{
1024 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1025 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1026 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1027 dma_addr_t mapping;
1028
1029 if (unlikely(page == NULL))
1030 return -ENOMEM;
1031
4f40f2cb 1032 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1033 PCI_DMA_FROMDEVICE);
8d8bb39b 1034 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1035 __free_pages(page, PAGES_PER_SGE_SHIFT);
1036 return -ENOMEM;
1037 }
1038
1039 sw_buf->page = page;
1040 pci_unmap_addr_set(sw_buf, mapping, mapping);
1041
1042 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1043 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1044
1045 return 0;
1046}
1047
a2fbb9ea
ET
1048static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1049 struct bnx2x_fastpath *fp, u16 index)
1050{
1051 struct sk_buff *skb;
1052 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1053 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1054 dma_addr_t mapping;
1055
1056 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1057 if (unlikely(skb == NULL))
1058 return -ENOMEM;
1059
437cf2f1 1060 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1061 PCI_DMA_FROMDEVICE);
8d8bb39b 1062 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1063 dev_kfree_skb(skb);
1064 return -ENOMEM;
1065 }
1066
1067 rx_buf->skb = skb;
1068 pci_unmap_addr_set(rx_buf, mapping, mapping);
1069
1070 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1071 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1072
1073 return 0;
1074}
1075
1076/* note that we are not allocating a new skb,
1077 * we are just moving one from cons to prod
1078 * we are not creating a new mapping,
1079 * so there is no need to check for dma_mapping_error().
1080 */
1081static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1082 struct sk_buff *skb, u16 cons, u16 prod)
1083{
1084 struct bnx2x *bp = fp->bp;
1085 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1086 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1087 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1088 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1089
1090 pci_dma_sync_single_for_device(bp->pdev,
1091 pci_unmap_addr(cons_rx_buf, mapping),
1092 bp->rx_offset + RX_COPY_THRESH,
1093 PCI_DMA_FROMDEVICE);
1094
1095 prod_rx_buf->skb = cons_rx_buf->skb;
1096 pci_unmap_addr_set(prod_rx_buf, mapping,
1097 pci_unmap_addr(cons_rx_buf, mapping));
1098 *prod_bd = *cons_bd;
1099}
1100
7a9b2557
VZ
1101static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1102 u16 idx)
1103{
1104 u16 last_max = fp->last_max_sge;
1105
1106 if (SUB_S16(idx, last_max) > 0)
1107 fp->last_max_sge = idx;
1108}
1109
1110static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1111{
1112 int i, j;
1113
1114 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1115 int idx = RX_SGE_CNT * i - 1;
1116
1117 for (j = 0; j < 2; j++) {
1118 SGE_MASK_CLEAR_BIT(fp, idx);
1119 idx--;
1120 }
1121 }
1122}
1123
1124static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1125 struct eth_fast_path_rx_cqe *fp_cqe)
1126{
1127 struct bnx2x *bp = fp->bp;
4f40f2cb 1128 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1129 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1130 SGE_PAGE_SHIFT;
7a9b2557
VZ
1131 u16 last_max, last_elem, first_elem;
1132 u16 delta = 0;
1133 u16 i;
1134
1135 if (!sge_len)
1136 return;
1137
1138 /* First mark all used pages */
1139 for (i = 0; i < sge_len; i++)
1140 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1141
1142 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1143 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1144
1145 /* Here we assume that the last SGE index is the biggest */
1146 prefetch((void *)(fp->sge_mask));
1147 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1148
1149 last_max = RX_SGE(fp->last_max_sge);
1150 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1151 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1152
1153 /* If ring is not full */
1154 if (last_elem + 1 != first_elem)
1155 last_elem++;
1156
1157 /* Now update the prod */
1158 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1159 if (likely(fp->sge_mask[i]))
1160 break;
1161
1162 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1163 delta += RX_SGE_MASK_ELEM_SZ;
1164 }
1165
1166 if (delta > 0) {
1167 fp->rx_sge_prod += delta;
1168 /* clear page-end entries */
1169 bnx2x_clear_sge_mask_next_elems(fp);
1170 }
1171
1172 DP(NETIF_MSG_RX_STATUS,
1173 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1174 fp->last_max_sge, fp->rx_sge_prod);
1175}
1176
1177static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1178{
1179 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1180 memset(fp->sge_mask, 0xff,
1181 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1182
33471629
EG
1183 /* Clear the two last indices in the page to 1:
1184 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1185 hence will never be indicated and should be removed from
1186 the calculations. */
1187 bnx2x_clear_sge_mask_next_elems(fp);
1188}
1189
1190static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1191 struct sk_buff *skb, u16 cons, u16 prod)
1192{
1193 struct bnx2x *bp = fp->bp;
1194 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1195 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1196 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1197 dma_addr_t mapping;
1198
1199 /* move empty skb from pool to prod and map it */
1200 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1201 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1202 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1203 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1204
1205 /* move partial skb from cons to pool (don't unmap yet) */
1206 fp->tpa_pool[queue] = *cons_rx_buf;
1207
1208 /* mark bin state as start - print error if current state != stop */
1209 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1210 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1211
1212 fp->tpa_state[queue] = BNX2X_TPA_START;
1213
1214 /* point prod_bd to new skb */
1215 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1216 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1217
1218#ifdef BNX2X_STOP_ON_ERROR
1219 fp->tpa_queue_used |= (1 << queue);
1220#ifdef __powerpc64__
1221 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1222#else
1223 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1224#endif
1225 fp->tpa_queue_used);
1226#endif
1227}
1228
1229static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1230 struct sk_buff *skb,
1231 struct eth_fast_path_rx_cqe *fp_cqe,
1232 u16 cqe_idx)
1233{
1234 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1235 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1236 u32 i, frag_len, frag_size, pages;
1237 int err;
1238 int j;
1239
1240 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1241 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1242
1243 /* This is needed in order to enable forwarding support */
1244 if (frag_size)
4f40f2cb 1245 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1246 max(frag_size, (u32)len_on_bd));
1247
1248#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1249 if (pages >
1250 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1251 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1252 pages, cqe_idx);
1253 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1254 fp_cqe->pkt_len, len_on_bd);
1255 bnx2x_panic();
1256 return -EINVAL;
1257 }
1258#endif
1259
1260 /* Run through the SGL and compose the fragmented skb */
1261 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1262 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1263
1264 /* FW gives the indices of the SGE as if the ring is an array
1265 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1266 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1267 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1268 old_rx_pg = *rx_pg;
1269
1270 /* If we fail to allocate a substitute page, we simply stop
1271 where we are and drop the whole packet */
1272 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1273 if (unlikely(err)) {
de832a55 1274 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1275 return err;
1276 }
1277
1278 /* Unmap the page as we r going to pass it to the stack */
1279 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1280 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1281
1282 /* Add one frag and update the appropriate fields in the skb */
1283 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1284
1285 skb->data_len += frag_len;
1286 skb->truesize += frag_len;
1287 skb->len += frag_len;
1288
1289 frag_size -= frag_len;
1290 }
1291
1292 return 0;
1293}
1294
1295static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1296 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1297 u16 cqe_idx)
1298{
1299 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1300 struct sk_buff *skb = rx_buf->skb;
1301 /* alloc new skb */
1302 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1303
1304 /* Unmap skb in the pool anyway, as we are going to change
1305 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1306 fails. */
1307 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1308 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1309
7a9b2557 1310 if (likely(new_skb)) {
66e855f3
YG
1311 /* fix ip xsum and give it to the stack */
1312 /* (no need to map the new skb) */
0c6671b0
EG
1313#ifdef BCM_VLAN
1314 int is_vlan_cqe =
1315 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1316 PARSING_FLAGS_VLAN);
1317 int is_not_hwaccel_vlan_cqe =
1318 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1319#endif
7a9b2557
VZ
1320
1321 prefetch(skb);
1322 prefetch(((char *)(skb)) + 128);
1323
7a9b2557
VZ
1324#ifdef BNX2X_STOP_ON_ERROR
1325 if (pad + len > bp->rx_buf_size) {
1326 BNX2X_ERR("skb_put is about to fail... "
1327 "pad %d len %d rx_buf_size %d\n",
1328 pad, len, bp->rx_buf_size);
1329 bnx2x_panic();
1330 return;
1331 }
1332#endif
1333
1334 skb_reserve(skb, pad);
1335 skb_put(skb, len);
1336
1337 skb->protocol = eth_type_trans(skb, bp->dev);
1338 skb->ip_summed = CHECKSUM_UNNECESSARY;
1339
1340 {
1341 struct iphdr *iph;
1342
1343 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1344#ifdef BCM_VLAN
1345 /* If there is no Rx VLAN offloading -
1346 take VLAN tag into an account */
1347 if (unlikely(is_not_hwaccel_vlan_cqe))
1348 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1349#endif
7a9b2557
VZ
1350 iph->check = 0;
1351 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1352 }
1353
1354 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1355 &cqe->fast_path_cqe, cqe_idx)) {
1356#ifdef BCM_VLAN
0c6671b0
EG
1357 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1358 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1359 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1360 le16_to_cpu(cqe->fast_path_cqe.
1361 vlan_tag));
1362 else
1363#endif
1364 netif_receive_skb(skb);
1365 } else {
1366 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1367 " - dropping packet!\n");
1368 dev_kfree_skb(skb);
1369 }
1370
7a9b2557
VZ
1371
1372 /* put new skb in bin */
1373 fp->tpa_pool[queue].skb = new_skb;
1374
1375 } else {
66e855f3 1376 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1377 DP(NETIF_MSG_RX_STATUS,
1378 "Failed to allocate new skb - dropping packet!\n");
de832a55 1379 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1380 }
1381
1382 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1383}
1384
1385static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1386 struct bnx2x_fastpath *fp,
1387 u16 bd_prod, u16 rx_comp_prod,
1388 u16 rx_sge_prod)
1389{
8d9c5f34 1390 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1391 int i;
1392
1393 /* Update producers */
1394 rx_prods.bd_prod = bd_prod;
1395 rx_prods.cqe_prod = rx_comp_prod;
1396 rx_prods.sge_prod = rx_sge_prod;
1397
58f4c4cf
EG
1398 /*
1399 * Make sure that the BD and SGE data is updated before updating the
1400 * producers since FW might read the BD/SGE right after the producer
1401 * is updated.
1402 * This is only applicable for weak-ordered memory model archs such
1403 * as IA-64. The following barrier is also mandatory since FW will
1404 * assumes BDs must have buffers.
1405 */
1406 wmb();
1407
8d9c5f34
EG
1408 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1409 REG_WR(bp, BAR_USTRORM_INTMEM +
1410 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
7a9b2557
VZ
1411 ((u32 *)&rx_prods)[i]);
1412
58f4c4cf
EG
1413 mmiowb(); /* keep prod updates ordered */
1414
7a9b2557 1415 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1416 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1417 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1418}
1419
a2fbb9ea
ET
1420static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1421{
1422 struct bnx2x *bp = fp->bp;
34f80b04 1423 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1424 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1425 int rx_pkt = 0;
1426
1427#ifdef BNX2X_STOP_ON_ERROR
1428 if (unlikely(bp->panic))
1429 return 0;
1430#endif
1431
34f80b04
EG
1432 /* CQ "next element" is of the size of the regular element,
1433 that's why it's ok here */
a2fbb9ea
ET
1434 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1435 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1436 hw_comp_cons++;
1437
1438 bd_cons = fp->rx_bd_cons;
1439 bd_prod = fp->rx_bd_prod;
34f80b04 1440 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1441 sw_comp_cons = fp->rx_comp_cons;
1442 sw_comp_prod = fp->rx_comp_prod;
1443
1444 /* Memory barrier necessary as speculative reads of the rx
1445 * buffer can be ahead of the index in the status block
1446 */
1447 rmb();
1448
1449 DP(NETIF_MSG_RX_STATUS,
1450 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
34f80b04 1451 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1452
1453 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1454 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1455 struct sk_buff *skb;
1456 union eth_rx_cqe *cqe;
34f80b04
EG
1457 u8 cqe_fp_flags;
1458 u16 len, pad;
a2fbb9ea
ET
1459
1460 comp_ring_cons = RCQ_BD(sw_comp_cons);
1461 bd_prod = RX_BD(bd_prod);
1462 bd_cons = RX_BD(bd_cons);
1463
1464 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1465 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1466
a2fbb9ea 1467 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1468 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1469 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1470 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1471 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1472 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1473
1474 /* is this a slowpath msg? */
34f80b04 1475 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1476 bnx2x_sp_event(fp, cqe);
1477 goto next_cqe;
1478
1479 /* this is an rx packet */
1480 } else {
1481 rx_buf = &fp->rx_buf_ring[bd_cons];
1482 skb = rx_buf->skb;
a2fbb9ea
ET
1483 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1484 pad = cqe->fast_path_cqe.placement_offset;
1485
7a9b2557
VZ
1486 /* If CQE is marked both TPA_START and TPA_END
1487 it is a non-TPA CQE */
1488 if ((!fp->disable_tpa) &&
1489 (TPA_TYPE(cqe_fp_flags) !=
1490 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1491 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1492
1493 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1494 DP(NETIF_MSG_RX_STATUS,
1495 "calling tpa_start on queue %d\n",
1496 queue);
1497
1498 bnx2x_tpa_start(fp, queue, skb,
1499 bd_cons, bd_prod);
1500 goto next_rx;
1501 }
1502
1503 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1504 DP(NETIF_MSG_RX_STATUS,
1505 "calling tpa_stop on queue %d\n",
1506 queue);
1507
1508 if (!BNX2X_RX_SUM_FIX(cqe))
1509 BNX2X_ERR("STOP on none TCP "
1510 "data\n");
1511
1512 /* This is a size of the linear data
1513 on this skb */
1514 len = le16_to_cpu(cqe->fast_path_cqe.
1515 len_on_bd);
1516 bnx2x_tpa_stop(bp, fp, queue, pad,
1517 len, cqe, comp_ring_cons);
1518#ifdef BNX2X_STOP_ON_ERROR
1519 if (bp->panic)
1520 return -EINVAL;
1521#endif
1522
1523 bnx2x_update_sge_prod(fp,
1524 &cqe->fast_path_cqe);
1525 goto next_cqe;
1526 }
1527 }
1528
a2fbb9ea
ET
1529 pci_dma_sync_single_for_device(bp->pdev,
1530 pci_unmap_addr(rx_buf, mapping),
1531 pad + RX_COPY_THRESH,
1532 PCI_DMA_FROMDEVICE);
1533 prefetch(skb);
1534 prefetch(((char *)(skb)) + 128);
1535
1536 /* is this an error packet? */
34f80b04 1537 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1538 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1539 "ERROR flags %x rx packet %u\n",
1540 cqe_fp_flags, sw_comp_cons);
de832a55 1541 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1542 goto reuse_rx;
1543 }
1544
1545 /* Since we don't have a jumbo ring
1546 * copy small packets if mtu > 1500
1547 */
1548 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1549 (len <= RX_COPY_THRESH)) {
1550 struct sk_buff *new_skb;
1551
1552 new_skb = netdev_alloc_skb(bp->dev,
1553 len + pad);
1554 if (new_skb == NULL) {
1555 DP(NETIF_MSG_RX_ERR,
34f80b04 1556 "ERROR packet dropped "
a2fbb9ea 1557 "because of alloc failure\n");
de832a55 1558 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1559 goto reuse_rx;
1560 }
1561
1562 /* aligned copy */
1563 skb_copy_from_linear_data_offset(skb, pad,
1564 new_skb->data + pad, len);
1565 skb_reserve(new_skb, pad);
1566 skb_put(new_skb, len);
1567
1568 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1569
1570 skb = new_skb;
1571
1572 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1573 pci_unmap_single(bp->pdev,
1574 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1575 bp->rx_buf_size,
a2fbb9ea
ET
1576 PCI_DMA_FROMDEVICE);
1577 skb_reserve(skb, pad);
1578 skb_put(skb, len);
1579
1580 } else {
1581 DP(NETIF_MSG_RX_ERR,
34f80b04 1582 "ERROR packet dropped because "
a2fbb9ea 1583 "of alloc failure\n");
de832a55 1584 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1585reuse_rx:
1586 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1587 goto next_rx;
1588 }
1589
1590 skb->protocol = eth_type_trans(skb, bp->dev);
1591
1592 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1593 if (bp->rx_csum) {
1adcd8be
EG
1594 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1595 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1596 else
de832a55 1597 fp->eth_q_stats.hw_csum_err++;
66e855f3 1598 }
a2fbb9ea
ET
1599 }
1600
748e5439 1601 skb_record_rx_queue(skb, fp->index);
a2fbb9ea 1602#ifdef BCM_VLAN
0c6671b0 1603 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1604 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1605 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1606 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1607 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1608 else
1609#endif
34f80b04 1610 netif_receive_skb(skb);
a2fbb9ea 1611
a2fbb9ea
ET
1612
1613next_rx:
1614 rx_buf->skb = NULL;
1615
1616 bd_cons = NEXT_RX_IDX(bd_cons);
1617 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1618 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1619 rx_pkt++;
a2fbb9ea
ET
1620next_cqe:
1621 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1622 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1623
34f80b04 1624 if (rx_pkt == budget)
a2fbb9ea
ET
1625 break;
1626 } /* while */
1627
1628 fp->rx_bd_cons = bd_cons;
34f80b04 1629 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1630 fp->rx_comp_cons = sw_comp_cons;
1631 fp->rx_comp_prod = sw_comp_prod;
1632
7a9b2557
VZ
1633 /* Update producers */
1634 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1635 fp->rx_sge_prod);
a2fbb9ea
ET
1636
1637 fp->rx_pkt += rx_pkt;
1638 fp->rx_calls++;
1639
1640 return rx_pkt;
1641}
1642
1643static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1644{
1645 struct bnx2x_fastpath *fp = fp_cookie;
1646 struct bnx2x *bp = fp->bp;
34f80b04 1647 int index = FP_IDX(fp);
a2fbb9ea 1648
da5a662a
VZ
1649 /* Return here if interrupt is disabled */
1650 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1651 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1652 return IRQ_HANDLED;
1653 }
1654
34f80b04
EG
1655 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1656 index, FP_SB_ID(fp));
1657 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1658
1659#ifdef BNX2X_STOP_ON_ERROR
1660 if (unlikely(bp->panic))
1661 return IRQ_HANDLED;
1662#endif
1663
1664 prefetch(fp->rx_cons_sb);
1665 prefetch(fp->tx_cons_sb);
1666 prefetch(&fp->status_blk->c_status_block.status_block_index);
1667 prefetch(&fp->status_blk->u_status_block.status_block_index);
1668
288379f0 1669 napi_schedule(&bnx2x_fp(bp, index, napi));
34f80b04 1670
a2fbb9ea
ET
1671 return IRQ_HANDLED;
1672}
1673
1674static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1675{
555f6c78 1676 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1677 u16 status = bnx2x_ack_int(bp);
34f80b04 1678 u16 mask;
a2fbb9ea 1679
34f80b04 1680 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1681 if (unlikely(status == 0)) {
1682 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1683 return IRQ_NONE;
1684 }
34f80b04 1685 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
a2fbb9ea 1686
34f80b04 1687 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1688 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1689 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1690 return IRQ_HANDLED;
1691 }
1692
3196a88a
EG
1693#ifdef BNX2X_STOP_ON_ERROR
1694 if (unlikely(bp->panic))
1695 return IRQ_HANDLED;
1696#endif
1697
34f80b04
EG
1698 mask = 0x2 << bp->fp[0].sb_id;
1699 if (status & mask) {
a2fbb9ea
ET
1700 struct bnx2x_fastpath *fp = &bp->fp[0];
1701
1702 prefetch(fp->rx_cons_sb);
1703 prefetch(fp->tx_cons_sb);
1704 prefetch(&fp->status_blk->c_status_block.status_block_index);
1705 prefetch(&fp->status_blk->u_status_block.status_block_index);
1706
288379f0 1707 napi_schedule(&bnx2x_fp(bp, 0, napi));
a2fbb9ea 1708
34f80b04 1709 status &= ~mask;
a2fbb9ea
ET
1710 }
1711
a2fbb9ea 1712
34f80b04 1713 if (unlikely(status & 0x1)) {
1cf167f2 1714 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1715
1716 status &= ~0x1;
1717 if (!status)
1718 return IRQ_HANDLED;
1719 }
1720
34f80b04
EG
1721 if (status)
1722 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1723 status);
a2fbb9ea 1724
c18487ee 1725 return IRQ_HANDLED;
a2fbb9ea
ET
1726}
1727
c18487ee 1728/* end of fast path */
a2fbb9ea 1729
bb2a0f7a 1730static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1731
c18487ee
YR
1732/* Link */
1733
1734/*
1735 * General service functions
1736 */
a2fbb9ea 1737
4a37fb66 1738static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1739{
1740 u32 lock_status;
1741 u32 resource_bit = (1 << resource);
4a37fb66
YG
1742 int func = BP_FUNC(bp);
1743 u32 hw_lock_control_reg;
c18487ee 1744 int cnt;
a2fbb9ea 1745
c18487ee
YR
1746 /* Validating that the resource is within range */
1747 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1748 DP(NETIF_MSG_HW,
1749 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1750 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1751 return -EINVAL;
1752 }
a2fbb9ea 1753
4a37fb66
YG
1754 if (func <= 5) {
1755 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1756 } else {
1757 hw_lock_control_reg =
1758 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1759 }
1760
c18487ee 1761 /* Validating that the resource is not already taken */
4a37fb66 1762 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1763 if (lock_status & resource_bit) {
1764 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1765 lock_status, resource_bit);
1766 return -EEXIST;
1767 }
a2fbb9ea 1768
46230476
EG
1769 /* Try for 5 second every 5ms */
1770 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1771 /* Try to acquire the lock */
4a37fb66
YG
1772 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1773 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1774 if (lock_status & resource_bit)
1775 return 0;
a2fbb9ea 1776
c18487ee 1777 msleep(5);
a2fbb9ea 1778 }
c18487ee
YR
1779 DP(NETIF_MSG_HW, "Timeout\n");
1780 return -EAGAIN;
1781}
a2fbb9ea 1782
4a37fb66 1783static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1784{
1785 u32 lock_status;
1786 u32 resource_bit = (1 << resource);
4a37fb66
YG
1787 int func = BP_FUNC(bp);
1788 u32 hw_lock_control_reg;
a2fbb9ea 1789
c18487ee
YR
1790 /* Validating that the resource is within range */
1791 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1792 DP(NETIF_MSG_HW,
1793 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1794 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1795 return -EINVAL;
1796 }
1797
4a37fb66
YG
1798 if (func <= 5) {
1799 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1800 } else {
1801 hw_lock_control_reg =
1802 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1803 }
1804
c18487ee 1805 /* Validating that the resource is currently taken */
4a37fb66 1806 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1807 if (!(lock_status & resource_bit)) {
1808 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1809 lock_status, resource_bit);
1810 return -EFAULT;
a2fbb9ea
ET
1811 }
1812
4a37fb66 1813 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1814 return 0;
1815}
1816
1817/* HW Lock for shared dual port PHYs */
4a37fb66 1818static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee
YR
1819{
1820 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1821
34f80b04 1822 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1823
c18487ee
YR
1824 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1825 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1826 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
c18487ee 1827}
a2fbb9ea 1828
4a37fb66 1829static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee
YR
1830{
1831 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1832
c18487ee
YR
1833 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1834 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1835 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
a2fbb9ea 1836
34f80b04 1837 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1838}
a2fbb9ea 1839
17de50b7 1840int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1841{
1842 /* The GPIO should be swapped if swap register is set and active */
1843 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1844 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1845 int gpio_shift = gpio_num +
1846 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1847 u32 gpio_mask = (1 << gpio_shift);
1848 u32 gpio_reg;
a2fbb9ea 1849
c18487ee
YR
1850 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1851 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1852 return -EINVAL;
1853 }
a2fbb9ea 1854
4a37fb66 1855 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1856 /* read GPIO and mask except the float bits */
1857 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1858
c18487ee
YR
1859 switch (mode) {
1860 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1861 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1862 gpio_num, gpio_shift);
1863 /* clear FLOAT and set CLR */
1864 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1865 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1866 break;
a2fbb9ea 1867
c18487ee
YR
1868 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1869 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1870 gpio_num, gpio_shift);
1871 /* clear FLOAT and set SET */
1872 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1873 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1874 break;
a2fbb9ea 1875
17de50b7 1876 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1877 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1878 gpio_num, gpio_shift);
1879 /* set FLOAT */
1880 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1881 break;
a2fbb9ea 1882
c18487ee
YR
1883 default:
1884 break;
a2fbb9ea
ET
1885 }
1886
c18487ee 1887 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1888 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1889
c18487ee 1890 return 0;
a2fbb9ea
ET
1891}
1892
c18487ee 1893static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1894{
c18487ee
YR
1895 u32 spio_mask = (1 << spio_num);
1896 u32 spio_reg;
a2fbb9ea 1897
c18487ee
YR
1898 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1899 (spio_num > MISC_REGISTERS_SPIO_7)) {
1900 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1901 return -EINVAL;
a2fbb9ea
ET
1902 }
1903
4a37fb66 1904 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1905 /* read SPIO and mask except the float bits */
1906 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1907
c18487ee 1908 switch (mode) {
6378c025 1909 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
1910 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1911 /* clear FLOAT and set CLR */
1912 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1913 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1914 break;
a2fbb9ea 1915
6378c025 1916 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
1917 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1918 /* clear FLOAT and set SET */
1919 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1920 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1921 break;
a2fbb9ea 1922
c18487ee
YR
1923 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1924 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1925 /* set FLOAT */
1926 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1927 break;
a2fbb9ea 1928
c18487ee
YR
1929 default:
1930 break;
a2fbb9ea
ET
1931 }
1932
c18487ee 1933 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 1934 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 1935
a2fbb9ea
ET
1936 return 0;
1937}
1938
c18487ee 1939static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1940{
ad33ea3a
EG
1941 switch (bp->link_vars.ieee_fc &
1942 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 1943 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 1944 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1945 ADVERTISED_Pause);
1946 break;
1947 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 1948 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
1949 ADVERTISED_Pause);
1950 break;
1951 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 1952 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee
YR
1953 break;
1954 default:
34f80b04 1955 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1956 ADVERTISED_Pause);
1957 break;
1958 }
1959}
f1410647 1960
c18487ee
YR
1961static void bnx2x_link_report(struct bnx2x *bp)
1962{
1963 if (bp->link_vars.link_up) {
1964 if (bp->state == BNX2X_STATE_OPEN)
1965 netif_carrier_on(bp->dev);
1966 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 1967
c18487ee 1968 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 1969
c18487ee
YR
1970 if (bp->link_vars.duplex == DUPLEX_FULL)
1971 printk("full duplex");
1972 else
1973 printk("half duplex");
f1410647 1974
c0700f90
DM
1975 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
1976 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 1977 printk(", receive ");
c0700f90 1978 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
c18487ee
YR
1979 printk("& transmit ");
1980 } else {
1981 printk(", transmit ");
1982 }
1983 printk("flow control ON");
1984 }
1985 printk("\n");
f1410647 1986
c18487ee
YR
1987 } else { /* link_down */
1988 netif_carrier_off(bp->dev);
1989 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 1990 }
c18487ee
YR
1991}
1992
1993static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1994{
19680c48
EG
1995 if (!BP_NOMCP(bp)) {
1996 u8 rc;
a2fbb9ea 1997
19680c48 1998 /* Initialize link parameters structure variables */
8c99e7b0
YR
1999 /* It is recommended to turn off RX FC for jumbo frames
2000 for better performance */
2001 if (IS_E1HMF(bp))
c0700f90 2002 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
8c99e7b0 2003 else if (bp->dev->mtu > 5000)
c0700f90 2004 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2005 else
c0700f90 2006 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2007
4a37fb66 2008 bnx2x_acquire_phy_lock(bp);
19680c48 2009 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2010 bnx2x_release_phy_lock(bp);
a2fbb9ea 2011
3c96c68b
EG
2012 bnx2x_calc_fc_adv(bp);
2013
19680c48
EG
2014 if (bp->link_vars.link_up)
2015 bnx2x_link_report(bp);
a2fbb9ea 2016
34f80b04 2017
19680c48
EG
2018 return rc;
2019 }
2020 BNX2X_ERR("Bootcode is missing -not initializing link\n");
2021 return -EINVAL;
a2fbb9ea
ET
2022}
2023
c18487ee 2024static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2025{
19680c48 2026 if (!BP_NOMCP(bp)) {
4a37fb66 2027 bnx2x_acquire_phy_lock(bp);
19680c48 2028 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2029 bnx2x_release_phy_lock(bp);
a2fbb9ea 2030
19680c48
EG
2031 bnx2x_calc_fc_adv(bp);
2032 } else
2033 BNX2X_ERR("Bootcode is missing -not setting link\n");
c18487ee 2034}
a2fbb9ea 2035
c18487ee
YR
2036static void bnx2x__link_reset(struct bnx2x *bp)
2037{
19680c48 2038 if (!BP_NOMCP(bp)) {
4a37fb66 2039 bnx2x_acquire_phy_lock(bp);
19680c48 2040 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
4a37fb66 2041 bnx2x_release_phy_lock(bp);
19680c48
EG
2042 } else
2043 BNX2X_ERR("Bootcode is missing -not resetting link\n");
c18487ee 2044}
a2fbb9ea 2045
c18487ee
YR
2046static u8 bnx2x_link_test(struct bnx2x *bp)
2047{
2048 u8 rc;
a2fbb9ea 2049
4a37fb66 2050 bnx2x_acquire_phy_lock(bp);
c18487ee 2051 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2052 bnx2x_release_phy_lock(bp);
a2fbb9ea 2053
c18487ee
YR
2054 return rc;
2055}
a2fbb9ea 2056
8a1c38d1 2057static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2058{
8a1c38d1
EG
2059 u32 r_param = bp->link_vars.line_speed / 8;
2060 u32 fair_periodic_timeout_usec;
2061 u32 t_fair;
34f80b04 2062
8a1c38d1
EG
2063 memset(&(bp->cmng.rs_vars), 0,
2064 sizeof(struct rate_shaping_vars_per_port));
2065 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2066
8a1c38d1
EG
2067 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2068 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2069
8a1c38d1
EG
2070 /* this is the threshold below which no timer arming will occur
2071 1.25 coefficient is for the threshold to be a little bigger
2072 than the real time, to compensate for timer in-accuracy */
2073 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2074 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2075
8a1c38d1
EG
2076 /* resolution of fairness timer */
2077 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2078 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2079 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2080
8a1c38d1
EG
2081 /* this is the threshold below which we won't arm the timer anymore */
2082 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2083
8a1c38d1
EG
2084 /* we multiply by 1e3/8 to get bytes/msec.
2085 We don't want the credits to pass a credit
2086 of the t_fair*FAIR_MEM (algorithm resolution) */
2087 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2088 /* since each tick is 4 usec */
2089 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2090}
2091
8a1c38d1 2092static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2093{
2094 struct rate_shaping_vars_per_vn m_rs_vn;
2095 struct fairness_vars_per_vn m_fair_vn;
2096 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2097 u16 vn_min_rate, vn_max_rate;
2098 int i;
2099
2100 /* If function is hidden - set min and max to zeroes */
2101 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2102 vn_min_rate = 0;
2103 vn_max_rate = 0;
2104
2105 } else {
2106 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2107 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
8a1c38d1 2108 /* If fairness is enabled (not all min rates are zeroes) and
34f80b04 2109 if current min rate is zero - set it to 1.
33471629 2110 This is a requirement of the algorithm. */
8a1c38d1 2111 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04
EG
2112 vn_min_rate = DEF_MIN_RATE;
2113 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2114 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2115 }
2116
8a1c38d1
EG
2117 DP(NETIF_MSG_IFUP,
2118 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2119 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2120
2121 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2122 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2123
2124 /* global vn counter - maximal Mbps for this vn */
2125 m_rs_vn.vn_counter.rate = vn_max_rate;
2126
2127 /* quota - number of bytes transmitted in this period */
2128 m_rs_vn.vn_counter.quota =
2129 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2130
8a1c38d1 2131 if (bp->vn_weight_sum) {
34f80b04
EG
2132 /* credit for each period of the fairness algorithm:
2133 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2134 vn_weight_sum should not be larger than 10000, thus
2135 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2136 than zero */
34f80b04 2137 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2138 max((u32)(vn_min_rate * (T_FAIR_COEF /
2139 (8 * bp->vn_weight_sum))),
2140 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2141 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2142 m_fair_vn.vn_credit_delta);
2143 }
2144
34f80b04
EG
2145 /* Store it to internal memory */
2146 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2147 REG_WR(bp, BAR_XSTRORM_INTMEM +
2148 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2149 ((u32 *)(&m_rs_vn))[i]);
2150
2151 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2152 REG_WR(bp, BAR_XSTRORM_INTMEM +
2153 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2154 ((u32 *)(&m_fair_vn))[i]);
2155}
2156
8a1c38d1 2157
c18487ee
YR
2158/* This function is called upon link interrupt */
2159static void bnx2x_link_attn(struct bnx2x *bp)
2160{
bb2a0f7a
YG
2161 /* Make sure that we are synced with the current statistics */
2162 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2163
c18487ee 2164 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2165
bb2a0f7a
YG
2166 if (bp->link_vars.link_up) {
2167
1c06328c
EG
2168 /* dropless flow control */
2169 if (CHIP_IS_E1H(bp)) {
2170 int port = BP_PORT(bp);
2171 u32 pause_enabled = 0;
2172
2173 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2174 pause_enabled = 1;
2175
2176 REG_WR(bp, BAR_USTRORM_INTMEM +
2177 USTORM_PAUSE_ENABLED_OFFSET(port),
2178 pause_enabled);
2179 }
2180
bb2a0f7a
YG
2181 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2182 struct host_port_stats *pstats;
2183
2184 pstats = bnx2x_sp(bp, port_stats);
2185 /* reset old bmac stats */
2186 memset(&(pstats->mac_stx[0]), 0,
2187 sizeof(struct mac_stx));
2188 }
2189 if ((bp->state == BNX2X_STATE_OPEN) ||
2190 (bp->state == BNX2X_STATE_DISABLED))
2191 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2192 }
2193
c18487ee
YR
2194 /* indicate link status */
2195 bnx2x_link_report(bp);
34f80b04
EG
2196
2197 if (IS_E1HMF(bp)) {
8a1c38d1 2198 int port = BP_PORT(bp);
34f80b04 2199 int func;
8a1c38d1 2200 int vn;
34f80b04
EG
2201
2202 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2203 if (vn == BP_E1HVN(bp))
2204 continue;
2205
8a1c38d1 2206 func = ((vn << 1) | port);
34f80b04
EG
2207
2208 /* Set the attention towards other drivers
2209 on the same port */
2210 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2211 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2212 }
34f80b04 2213
8a1c38d1
EG
2214 if (bp->link_vars.link_up) {
2215 int i;
2216
2217 /* Init rate shaping and fairness contexts */
2218 bnx2x_init_port_minmax(bp);
34f80b04 2219
34f80b04 2220 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2221 bnx2x_init_vn_minmax(bp, 2*vn + port);
2222
2223 /* Store it to internal memory */
2224 for (i = 0;
2225 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2226 REG_WR(bp, BAR_XSTRORM_INTMEM +
2227 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2228 ((u32 *)(&bp->cmng))[i]);
2229 }
34f80b04 2230 }
c18487ee 2231}
a2fbb9ea 2232
c18487ee
YR
2233static void bnx2x__link_status_update(struct bnx2x *bp)
2234{
2235 if (bp->state != BNX2X_STATE_OPEN)
2236 return;
a2fbb9ea 2237
c18487ee 2238 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2239
bb2a0f7a
YG
2240 if (bp->link_vars.link_up)
2241 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2242 else
2243 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2244
c18487ee
YR
2245 /* indicate link status */
2246 bnx2x_link_report(bp);
a2fbb9ea 2247}
a2fbb9ea 2248
34f80b04
EG
2249static void bnx2x_pmf_update(struct bnx2x *bp)
2250{
2251 int port = BP_PORT(bp);
2252 u32 val;
2253
2254 bp->port.pmf = 1;
2255 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2256
2257 /* enable nig attention */
2258 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2259 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2260 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2261
2262 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2263}
2264
c18487ee 2265/* end of Link */
a2fbb9ea
ET
2266
2267/* slow path */
2268
2269/*
2270 * General service functions
2271 */
2272
2273/* the slow path queue is odd since completions arrive on the fastpath ring */
2274static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2275 u32 data_hi, u32 data_lo, int common)
2276{
34f80b04 2277 int func = BP_FUNC(bp);
a2fbb9ea 2278
34f80b04
EG
2279 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2280 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2281 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2282 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2283 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2284
2285#ifdef BNX2X_STOP_ON_ERROR
2286 if (unlikely(bp->panic))
2287 return -EIO;
2288#endif
2289
34f80b04 2290 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2291
2292 if (!bp->spq_left) {
2293 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2294 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2295 bnx2x_panic();
2296 return -EBUSY;
2297 }
f1410647 2298
a2fbb9ea
ET
2299 /* CID needs port number to be encoded int it */
2300 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2301 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2302 HW_CID(bp, cid)));
2303 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2304 if (common)
2305 bp->spq_prod_bd->hdr.type |=
2306 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2307
2308 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2309 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2310
2311 bp->spq_left--;
2312
2313 if (bp->spq_prod_bd == bp->spq_last_bd) {
2314 bp->spq_prod_bd = bp->spq;
2315 bp->spq_prod_idx = 0;
2316 DP(NETIF_MSG_TIMER, "end of spq\n");
2317
2318 } else {
2319 bp->spq_prod_bd++;
2320 bp->spq_prod_idx++;
2321 }
2322
34f80b04 2323 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2324 bp->spq_prod_idx);
2325
34f80b04 2326 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2327 return 0;
2328}
2329
2330/* acquire split MCP access lock register */
4a37fb66 2331static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2332{
a2fbb9ea 2333 u32 i, j, val;
34f80b04 2334 int rc = 0;
a2fbb9ea
ET
2335
2336 might_sleep();
2337 i = 100;
2338 for (j = 0; j < i*10; j++) {
2339 val = (1UL << 31);
2340 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2341 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2342 if (val & (1L << 31))
2343 break;
2344
2345 msleep(5);
2346 }
a2fbb9ea 2347 if (!(val & (1L << 31))) {
19680c48 2348 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2349 rc = -EBUSY;
2350 }
2351
2352 return rc;
2353}
2354
4a37fb66
YG
2355/* release split MCP access lock register */
2356static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2357{
2358 u32 val = 0;
2359
2360 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2361}
2362
2363static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2364{
2365 struct host_def_status_block *def_sb = bp->def_status_blk;
2366 u16 rc = 0;
2367
2368 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2369 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2370 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2371 rc |= 1;
2372 }
2373 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2374 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2375 rc |= 2;
2376 }
2377 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2378 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2379 rc |= 4;
2380 }
2381 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2382 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2383 rc |= 8;
2384 }
2385 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2386 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2387 rc |= 16;
2388 }
2389 return rc;
2390}
2391
2392/*
2393 * slow path service functions
2394 */
2395
2396static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2397{
34f80b04 2398 int port = BP_PORT(bp);
5c862848
EG
2399 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2400 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2401 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2402 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2403 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2404 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2405 u32 aeu_mask;
a2fbb9ea 2406
a2fbb9ea
ET
2407 if (bp->attn_state & asserted)
2408 BNX2X_ERR("IGU ERROR\n");
2409
3fcaf2e5
EG
2410 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2411 aeu_mask = REG_RD(bp, aeu_addr);
2412
a2fbb9ea 2413 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2414 aeu_mask, asserted);
2415 aeu_mask &= ~(asserted & 0xff);
2416 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2417
3fcaf2e5
EG
2418 REG_WR(bp, aeu_addr, aeu_mask);
2419 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2420
3fcaf2e5 2421 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2422 bp->attn_state |= asserted;
3fcaf2e5 2423 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2424
2425 if (asserted & ATTN_HARD_WIRED_MASK) {
2426 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2427
a5e9a7cf
EG
2428 bnx2x_acquire_phy_lock(bp);
2429
877e9aa4
ET
2430 /* save nig interrupt mask */
2431 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2432 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2433
c18487ee 2434 bnx2x_link_attn(bp);
a2fbb9ea
ET
2435
2436 /* handle unicore attn? */
2437 }
2438 if (asserted & ATTN_SW_TIMER_4_FUNC)
2439 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2440
2441 if (asserted & GPIO_2_FUNC)
2442 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2443
2444 if (asserted & GPIO_3_FUNC)
2445 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2446
2447 if (asserted & GPIO_4_FUNC)
2448 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2449
2450 if (port == 0) {
2451 if (asserted & ATTN_GENERAL_ATTN_1) {
2452 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2453 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2454 }
2455 if (asserted & ATTN_GENERAL_ATTN_2) {
2456 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2457 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2458 }
2459 if (asserted & ATTN_GENERAL_ATTN_3) {
2460 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2461 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2462 }
2463 } else {
2464 if (asserted & ATTN_GENERAL_ATTN_4) {
2465 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2466 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2467 }
2468 if (asserted & ATTN_GENERAL_ATTN_5) {
2469 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2470 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2471 }
2472 if (asserted & ATTN_GENERAL_ATTN_6) {
2473 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2474 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2475 }
2476 }
2477
2478 } /* if hardwired */
2479
5c862848
EG
2480 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2481 asserted, hc_addr);
2482 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2483
2484 /* now set back the mask */
a5e9a7cf 2485 if (asserted & ATTN_NIG_FOR_FUNC) {
877e9aa4 2486 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
a5e9a7cf
EG
2487 bnx2x_release_phy_lock(bp);
2488 }
a2fbb9ea
ET
2489}
2490
877e9aa4 2491static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2492{
34f80b04 2493 int port = BP_PORT(bp);
877e9aa4
ET
2494 int reg_offset;
2495 u32 val;
2496
34f80b04
EG
2497 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2498 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2499
34f80b04 2500 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2501
2502 val = REG_RD(bp, reg_offset);
2503 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2504 REG_WR(bp, reg_offset, val);
2505
2506 BNX2X_ERR("SPIO5 hw attention\n");
2507
35b19ba5
EG
2508 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2509 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
877e9aa4
ET
2510 /* Fan failure attention */
2511
17de50b7 2512 /* The PHY reset is controlled by GPIO 1 */
877e9aa4 2513 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
17de50b7
EG
2514 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2515 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2516 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2517 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4 2518 /* mark the failure */
c18487ee 2519 bp->link_params.ext_phy_config &=
877e9aa4 2520 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2521 bp->link_params.ext_phy_config |=
877e9aa4
ET
2522 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2523 SHMEM_WR(bp,
2524 dev_info.port_hw_config[port].
2525 external_phy_config,
c18487ee 2526 bp->link_params.ext_phy_config);
877e9aa4
ET
2527 /* log the failure */
2528 printk(KERN_ERR PFX "Fan Failure on Network"
2529 " Controller %s has caused the driver to"
2530 " shutdown the card to prevent permanent"
2531 " damage. Please contact Dell Support for"
2532 " assistance\n", bp->dev->name);
2533 break;
2534
2535 default:
2536 break;
2537 }
2538 }
34f80b04
EG
2539
2540 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2541
2542 val = REG_RD(bp, reg_offset);
2543 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2544 REG_WR(bp, reg_offset, val);
2545
2546 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2547 (attn & HW_INTERRUT_ASSERT_SET_0));
2548 bnx2x_panic();
2549 }
877e9aa4
ET
2550}
2551
2552static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2553{
2554 u32 val;
2555
2556 if (attn & BNX2X_DOORQ_ASSERT) {
2557
2558 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2559 BNX2X_ERR("DB hw attention 0x%x\n", val);
2560 /* DORQ discard attention */
2561 if (val & 0x2)
2562 BNX2X_ERR("FATAL error from DORQ\n");
2563 }
34f80b04
EG
2564
2565 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2566
2567 int port = BP_PORT(bp);
2568 int reg_offset;
2569
2570 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2571 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2572
2573 val = REG_RD(bp, reg_offset);
2574 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2575 REG_WR(bp, reg_offset, val);
2576
2577 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2578 (attn & HW_INTERRUT_ASSERT_SET_1));
2579 bnx2x_panic();
2580 }
877e9aa4
ET
2581}
2582
2583static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2584{
2585 u32 val;
2586
2587 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2588
2589 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2590 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2591 /* CFC error attention */
2592 if (val & 0x2)
2593 BNX2X_ERR("FATAL error from CFC\n");
2594 }
2595
2596 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2597
2598 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2599 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2600 /* RQ_USDMDP_FIFO_OVERFLOW */
2601 if (val & 0x18000)
2602 BNX2X_ERR("FATAL error from PXP\n");
2603 }
34f80b04
EG
2604
2605 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2606
2607 int port = BP_PORT(bp);
2608 int reg_offset;
2609
2610 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2611 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2612
2613 val = REG_RD(bp, reg_offset);
2614 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2615 REG_WR(bp, reg_offset, val);
2616
2617 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2618 (attn & HW_INTERRUT_ASSERT_SET_2));
2619 bnx2x_panic();
2620 }
877e9aa4
ET
2621}
2622
2623static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2624{
34f80b04
EG
2625 u32 val;
2626
877e9aa4
ET
2627 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2628
34f80b04
EG
2629 if (attn & BNX2X_PMF_LINK_ASSERT) {
2630 int func = BP_FUNC(bp);
2631
2632 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2633 bnx2x__link_status_update(bp);
2634 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2635 DRV_STATUS_PMF)
2636 bnx2x_pmf_update(bp);
2637
2638 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2639
2640 BNX2X_ERR("MC assert!\n");
2641 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2642 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2643 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2644 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2645 bnx2x_panic();
2646
2647 } else if (attn & BNX2X_MCP_ASSERT) {
2648
2649 BNX2X_ERR("MCP assert!\n");
2650 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2651 bnx2x_fw_dump(bp);
877e9aa4
ET
2652
2653 } else
2654 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2655 }
2656
2657 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2658 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2659 if (attn & BNX2X_GRC_TIMEOUT) {
2660 val = CHIP_IS_E1H(bp) ?
2661 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2662 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2663 }
2664 if (attn & BNX2X_GRC_RSV) {
2665 val = CHIP_IS_E1H(bp) ?
2666 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2667 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2668 }
877e9aa4 2669 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2670 }
2671}
2672
2673static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2674{
a2fbb9ea
ET
2675 struct attn_route attn;
2676 struct attn_route group_mask;
34f80b04 2677 int port = BP_PORT(bp);
877e9aa4 2678 int index;
a2fbb9ea
ET
2679 u32 reg_addr;
2680 u32 val;
3fcaf2e5 2681 u32 aeu_mask;
a2fbb9ea
ET
2682
2683 /* need to take HW lock because MCP or other port might also
2684 try to handle this event */
4a37fb66 2685 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2686
2687 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2688 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2689 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2690 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2691 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2692 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2693
2694 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2695 if (deasserted & (1 << index)) {
2696 group_mask = bp->attn_group[index];
2697
34f80b04
EG
2698 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2699 index, group_mask.sig[0], group_mask.sig[1],
2700 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2701
877e9aa4
ET
2702 bnx2x_attn_int_deasserted3(bp,
2703 attn.sig[3] & group_mask.sig[3]);
2704 bnx2x_attn_int_deasserted1(bp,
2705 attn.sig[1] & group_mask.sig[1]);
2706 bnx2x_attn_int_deasserted2(bp,
2707 attn.sig[2] & group_mask.sig[2]);
2708 bnx2x_attn_int_deasserted0(bp,
2709 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2710
a2fbb9ea
ET
2711 if ((attn.sig[0] & group_mask.sig[0] &
2712 HW_PRTY_ASSERT_SET_0) ||
2713 (attn.sig[1] & group_mask.sig[1] &
2714 HW_PRTY_ASSERT_SET_1) ||
2715 (attn.sig[2] & group_mask.sig[2] &
2716 HW_PRTY_ASSERT_SET_2))
6378c025 2717 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2718 }
2719 }
2720
4a37fb66 2721 bnx2x_release_alr(bp);
a2fbb9ea 2722
5c862848 2723 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2724
2725 val = ~deasserted;
3fcaf2e5
EG
2726 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2727 val, reg_addr);
5c862848 2728 REG_WR(bp, reg_addr, val);
a2fbb9ea 2729
a2fbb9ea 2730 if (~bp->attn_state & deasserted)
3fcaf2e5 2731 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2732
2733 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2734 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2735
3fcaf2e5
EG
2736 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2737 aeu_mask = REG_RD(bp, reg_addr);
2738
2739 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2740 aeu_mask, deasserted);
2741 aeu_mask |= (deasserted & 0xff);
2742 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2743
3fcaf2e5
EG
2744 REG_WR(bp, reg_addr, aeu_mask);
2745 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2746
2747 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2748 bp->attn_state &= ~deasserted;
2749 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2750}
2751
2752static void bnx2x_attn_int(struct bnx2x *bp)
2753{
2754 /* read local copy of bits */
68d59484
EG
2755 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2756 attn_bits);
2757 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2758 attn_bits_ack);
a2fbb9ea
ET
2759 u32 attn_state = bp->attn_state;
2760
2761 /* look for changed bits */
2762 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2763 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2764
2765 DP(NETIF_MSG_HW,
2766 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2767 attn_bits, attn_ack, asserted, deasserted);
2768
2769 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2770 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2771
2772 /* handle bits that were raised */
2773 if (asserted)
2774 bnx2x_attn_int_asserted(bp, asserted);
2775
2776 if (deasserted)
2777 bnx2x_attn_int_deasserted(bp, deasserted);
2778}
2779
2780static void bnx2x_sp_task(struct work_struct *work)
2781{
1cf167f2 2782 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
2783 u16 status;
2784
34f80b04 2785
a2fbb9ea
ET
2786 /* Return here if interrupt is disabled */
2787 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2788 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2789 return;
2790 }
2791
2792 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2793/* if (status == 0) */
2794/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2795
3196a88a 2796 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2797
877e9aa4
ET
2798 /* HW attentions */
2799 if (status & 0x1)
a2fbb9ea 2800 bnx2x_attn_int(bp);
a2fbb9ea 2801
68d59484 2802 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
2803 IGU_INT_NOP, 1);
2804 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2805 IGU_INT_NOP, 1);
2806 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2807 IGU_INT_NOP, 1);
2808 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2809 IGU_INT_NOP, 1);
2810 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2811 IGU_INT_ENABLE, 1);
877e9aa4 2812
a2fbb9ea
ET
2813}
2814
2815static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2816{
2817 struct net_device *dev = dev_instance;
2818 struct bnx2x *bp = netdev_priv(dev);
2819
2820 /* Return here if interrupt is disabled */
2821 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2822 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2823 return IRQ_HANDLED;
2824 }
2825
8d9c5f34 2826 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2827
2828#ifdef BNX2X_STOP_ON_ERROR
2829 if (unlikely(bp->panic))
2830 return IRQ_HANDLED;
2831#endif
2832
1cf167f2 2833 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
2834
2835 return IRQ_HANDLED;
2836}
2837
2838/* end of slow path */
2839
2840/* Statistics */
2841
2842/****************************************************************************
2843* Macros
2844****************************************************************************/
2845
a2fbb9ea
ET
2846/* sum[hi:lo] += add[hi:lo] */
2847#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2848 do { \
2849 s_lo += a_lo; \
f5ba6772 2850 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
2851 } while (0)
2852
2853/* difference = minuend - subtrahend */
2854#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2855 do { \
bb2a0f7a
YG
2856 if (m_lo < s_lo) { \
2857 /* underflow */ \
a2fbb9ea 2858 d_hi = m_hi - s_hi; \
bb2a0f7a 2859 if (d_hi > 0) { \
6378c025 2860 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2861 d_hi--; \
2862 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 2863 } else { \
6378c025 2864 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2865 d_hi = 0; \
2866 d_lo = 0; \
2867 } \
bb2a0f7a
YG
2868 } else { \
2869 /* m_lo >= s_lo */ \
a2fbb9ea 2870 if (m_hi < s_hi) { \
bb2a0f7a
YG
2871 d_hi = 0; \
2872 d_lo = 0; \
2873 } else { \
6378c025 2874 /* m_hi >= s_hi */ \
bb2a0f7a
YG
2875 d_hi = m_hi - s_hi; \
2876 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2877 } \
2878 } \
2879 } while (0)
2880
bb2a0f7a 2881#define UPDATE_STAT64(s, t) \
a2fbb9ea 2882 do { \
bb2a0f7a
YG
2883 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2884 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2885 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2886 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2887 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2888 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
2889 } while (0)
2890
bb2a0f7a 2891#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 2892 do { \
bb2a0f7a
YG
2893 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2894 diff.lo, new->s##_lo, old->s##_lo); \
2895 ADD_64(estats->t##_hi, diff.hi, \
2896 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
2897 } while (0)
2898
2899/* sum[hi:lo] += add */
2900#define ADD_EXTEND_64(s_hi, s_lo, a) \
2901 do { \
2902 s_lo += a; \
2903 s_hi += (s_lo < a) ? 1 : 0; \
2904 } while (0)
2905
bb2a0f7a 2906#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 2907 do { \
bb2a0f7a
YG
2908 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2909 pstats->mac_stx[1].s##_lo, \
2910 new->s); \
a2fbb9ea
ET
2911 } while (0)
2912
bb2a0f7a 2913#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea
ET
2914 do { \
2915 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2916 old_tclient->s = le32_to_cpu(tclient->s); \
de832a55
EG
2917 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
2918 } while (0)
2919
2920#define UPDATE_EXTEND_USTAT(s, t) \
2921 do { \
2922 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
2923 old_uclient->s = uclient->s; \
2924 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
2925 } while (0)
2926
2927#define UPDATE_EXTEND_XSTAT(s, t) \
2928 do { \
2929 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2930 old_xclient->s = le32_to_cpu(xclient->s); \
de832a55
EG
2931 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
2932 } while (0)
2933
2934/* minuend -= subtrahend */
2935#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
2936 do { \
2937 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
2938 } while (0)
2939
2940/* minuend[hi:lo] -= subtrahend */
2941#define SUB_EXTEND_64(m_hi, m_lo, s) \
2942 do { \
2943 SUB_64(m_hi, 0, m_lo, s); \
2944 } while (0)
2945
2946#define SUB_EXTEND_USTAT(s, t) \
2947 do { \
2948 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
2949 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
2950 } while (0)
2951
2952/*
2953 * General service functions
2954 */
2955
2956static inline long bnx2x_hilo(u32 *hiref)
2957{
2958 u32 lo = *(hiref + 1);
2959#if (BITS_PER_LONG == 64)
2960 u32 hi = *hiref;
2961
2962 return HILO_U64(hi, lo);
2963#else
2964 return lo;
2965#endif
2966}
2967
2968/*
2969 * Init service functions
2970 */
2971
bb2a0f7a
YG
2972static void bnx2x_storm_stats_post(struct bnx2x *bp)
2973{
2974 if (!bp->stats_pending) {
2975 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 2976 int i, rc;
bb2a0f7a
YG
2977
2978 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 2979 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
2980 for_each_queue(bp, i)
2981 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
2982
2983 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
2984 ((u32 *)&ramrod_data)[1],
2985 ((u32 *)&ramrod_data)[0], 0);
2986 if (rc == 0) {
2987 /* stats ramrod has it's own slot on the spq */
2988 bp->spq_left++;
2989 bp->stats_pending = 1;
2990 }
2991 }
2992}
2993
2994static void bnx2x_stats_init(struct bnx2x *bp)
2995{
2996 int port = BP_PORT(bp);
de832a55 2997 int i;
bb2a0f7a 2998
de832a55 2999 bp->stats_pending = 0;
bb2a0f7a
YG
3000 bp->executer_idx = 0;
3001 bp->stats_counter = 0;
3002
3003 /* port stats */
3004 if (!BP_NOMCP(bp))
3005 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3006 else
3007 bp->port.port_stx = 0;
3008 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3009
3010 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3011 bp->port.old_nig_stats.brb_discard =
3012 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3013 bp->port.old_nig_stats.brb_truncate =
3014 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3015 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3016 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3017 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3018 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3019
3020 /* function stats */
de832a55
EG
3021 for_each_queue(bp, i) {
3022 struct bnx2x_fastpath *fp = &bp->fp[i];
3023
3024 memset(&fp->old_tclient, 0,
3025 sizeof(struct tstorm_per_client_stats));
3026 memset(&fp->old_uclient, 0,
3027 sizeof(struct ustorm_per_client_stats));
3028 memset(&fp->old_xclient, 0,
3029 sizeof(struct xstorm_per_client_stats));
3030 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3031 }
3032
bb2a0f7a 3033 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
bb2a0f7a
YG
3034 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3035
3036 bp->stats_state = STATS_STATE_DISABLED;
3037 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3038 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3039}
3040
3041static void bnx2x_hw_stats_post(struct bnx2x *bp)
3042{
3043 struct dmae_command *dmae = &bp->stats_dmae;
3044 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3045
3046 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3047 if (CHIP_REV_IS_SLOW(bp))
3048 return;
bb2a0f7a
YG
3049
3050 /* loader */
3051 if (bp->executer_idx) {
3052 int loader_idx = PMF_DMAE_C(bp);
3053
3054 memset(dmae, 0, sizeof(struct dmae_command));
3055
3056 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3057 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3058 DMAE_CMD_DST_RESET |
3059#ifdef __BIG_ENDIAN
3060 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3061#else
3062 DMAE_CMD_ENDIANITY_DW_SWAP |
3063#endif
3064 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3065 DMAE_CMD_PORT_0) |
3066 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3067 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3068 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3069 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3070 sizeof(struct dmae_command) *
3071 (loader_idx + 1)) >> 2;
3072 dmae->dst_addr_hi = 0;
3073 dmae->len = sizeof(struct dmae_command) >> 2;
3074 if (CHIP_IS_E1(bp))
3075 dmae->len--;
3076 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3077 dmae->comp_addr_hi = 0;
3078 dmae->comp_val = 1;
3079
3080 *stats_comp = 0;
3081 bnx2x_post_dmae(bp, dmae, loader_idx);
3082
3083 } else if (bp->func_stx) {
3084 *stats_comp = 0;
3085 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3086 }
3087}
3088
3089static int bnx2x_stats_comp(struct bnx2x *bp)
3090{
3091 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3092 int cnt = 10;
3093
3094 might_sleep();
3095 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3096 if (!cnt) {
3097 BNX2X_ERR("timeout waiting for stats finished\n");
3098 break;
3099 }
3100 cnt--;
12469401 3101 msleep(1);
bb2a0f7a
YG
3102 }
3103 return 1;
3104}
3105
3106/*
3107 * Statistics service functions
3108 */
3109
3110static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3111{
3112 struct dmae_command *dmae;
3113 u32 opcode;
3114 int loader_idx = PMF_DMAE_C(bp);
3115 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3116
3117 /* sanity */
3118 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3119 BNX2X_ERR("BUG!\n");
3120 return;
3121 }
3122
3123 bp->executer_idx = 0;
3124
3125 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3126 DMAE_CMD_C_ENABLE |
3127 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3128#ifdef __BIG_ENDIAN
3129 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3130#else
3131 DMAE_CMD_ENDIANITY_DW_SWAP |
3132#endif
3133 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3134 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3135
3136 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3137 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3138 dmae->src_addr_lo = bp->port.port_stx >> 2;
3139 dmae->src_addr_hi = 0;
3140 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3141 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3142 dmae->len = DMAE_LEN32_RD_MAX;
3143 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3144 dmae->comp_addr_hi = 0;
3145 dmae->comp_val = 1;
3146
3147 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3148 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3149 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3150 dmae->src_addr_hi = 0;
7a9b2557
VZ
3151 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3152 DMAE_LEN32_RD_MAX * 4);
3153 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3154 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3155 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3156 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3157 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3158 dmae->comp_val = DMAE_COMP_VAL;
3159
3160 *stats_comp = 0;
3161 bnx2x_hw_stats_post(bp);
3162 bnx2x_stats_comp(bp);
3163}
3164
3165static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3166{
3167 struct dmae_command *dmae;
34f80b04 3168 int port = BP_PORT(bp);
bb2a0f7a 3169 int vn = BP_E1HVN(bp);
a2fbb9ea 3170 u32 opcode;
bb2a0f7a 3171 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3172 u32 mac_addr;
bb2a0f7a
YG
3173 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3174
3175 /* sanity */
3176 if (!bp->link_vars.link_up || !bp->port.pmf) {
3177 BNX2X_ERR("BUG!\n");
3178 return;
3179 }
a2fbb9ea
ET
3180
3181 bp->executer_idx = 0;
bb2a0f7a
YG
3182
3183 /* MCP */
3184 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3185 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3186 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3187#ifdef __BIG_ENDIAN
bb2a0f7a 3188 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3189#else
bb2a0f7a 3190 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3191#endif
bb2a0f7a
YG
3192 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3193 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3194
bb2a0f7a 3195 if (bp->port.port_stx) {
a2fbb9ea
ET
3196
3197 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3198 dmae->opcode = opcode;
bb2a0f7a
YG
3199 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3200 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3201 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3202 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3203 dmae->len = sizeof(struct host_port_stats) >> 2;
3204 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3205 dmae->comp_addr_hi = 0;
3206 dmae->comp_val = 1;
a2fbb9ea
ET
3207 }
3208
bb2a0f7a
YG
3209 if (bp->func_stx) {
3210
3211 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3212 dmae->opcode = opcode;
3213 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3214 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3215 dmae->dst_addr_lo = bp->func_stx >> 2;
3216 dmae->dst_addr_hi = 0;
3217 dmae->len = sizeof(struct host_func_stats) >> 2;
3218 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3219 dmae->comp_addr_hi = 0;
3220 dmae->comp_val = 1;
a2fbb9ea
ET
3221 }
3222
bb2a0f7a 3223 /* MAC */
a2fbb9ea
ET
3224 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3225 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3226 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3227#ifdef __BIG_ENDIAN
3228 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3229#else
3230 DMAE_CMD_ENDIANITY_DW_SWAP |
3231#endif
bb2a0f7a
YG
3232 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3233 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3234
c18487ee 3235 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3236
3237 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3238 NIG_REG_INGRESS_BMAC0_MEM);
3239
3240 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3241 BIGMAC_REGISTER_TX_STAT_GTBYT */
3242 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3243 dmae->opcode = opcode;
3244 dmae->src_addr_lo = (mac_addr +
3245 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3246 dmae->src_addr_hi = 0;
3247 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3248 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3249 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3250 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3251 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3252 dmae->comp_addr_hi = 0;
3253 dmae->comp_val = 1;
3254
3255 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3256 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3257 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3258 dmae->opcode = opcode;
3259 dmae->src_addr_lo = (mac_addr +
3260 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3261 dmae->src_addr_hi = 0;
3262 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3263 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3264 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3265 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3266 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3267 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3268 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3269 dmae->comp_addr_hi = 0;
3270 dmae->comp_val = 1;
3271
c18487ee 3272 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3273
3274 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3275
3276 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3277 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3278 dmae->opcode = opcode;
3279 dmae->src_addr_lo = (mac_addr +
3280 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3281 dmae->src_addr_hi = 0;
3282 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3283 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3284 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3285 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3286 dmae->comp_addr_hi = 0;
3287 dmae->comp_val = 1;
3288
3289 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3290 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3291 dmae->opcode = opcode;
3292 dmae->src_addr_lo = (mac_addr +
3293 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3294 dmae->src_addr_hi = 0;
3295 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3296 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3297 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3298 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3299 dmae->len = 1;
3300 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3301 dmae->comp_addr_hi = 0;
3302 dmae->comp_val = 1;
3303
3304 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3305 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3306 dmae->opcode = opcode;
3307 dmae->src_addr_lo = (mac_addr +
3308 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3309 dmae->src_addr_hi = 0;
3310 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3311 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3312 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3313 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3314 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3315 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3316 dmae->comp_addr_hi = 0;
3317 dmae->comp_val = 1;
3318 }
3319
3320 /* NIG */
bb2a0f7a
YG
3321 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3322 dmae->opcode = opcode;
3323 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3324 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3325 dmae->src_addr_hi = 0;
3326 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3327 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3328 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3329 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3330 dmae->comp_addr_hi = 0;
3331 dmae->comp_val = 1;
3332
3333 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3334 dmae->opcode = opcode;
3335 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3336 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3337 dmae->src_addr_hi = 0;
3338 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3339 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3340 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3341 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3342 dmae->len = (2*sizeof(u32)) >> 2;
3343 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3344 dmae->comp_addr_hi = 0;
3345 dmae->comp_val = 1;
3346
a2fbb9ea
ET
3347 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3348 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3349 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3350 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3351#ifdef __BIG_ENDIAN
3352 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3353#else
3354 DMAE_CMD_ENDIANITY_DW_SWAP |
3355#endif
bb2a0f7a
YG
3356 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3357 (vn << DMAE_CMD_E1HVN_SHIFT));
3358 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3359 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3360 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3361 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3362 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3363 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3364 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3365 dmae->len = (2*sizeof(u32)) >> 2;
3366 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3367 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3368 dmae->comp_val = DMAE_COMP_VAL;
3369
3370 *stats_comp = 0;
a2fbb9ea
ET
3371}
3372
bb2a0f7a 3373static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3374{
bb2a0f7a
YG
3375 struct dmae_command *dmae = &bp->stats_dmae;
3376 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3377
bb2a0f7a
YG
3378 /* sanity */
3379 if (!bp->func_stx) {
3380 BNX2X_ERR("BUG!\n");
3381 return;
3382 }
a2fbb9ea 3383
bb2a0f7a
YG
3384 bp->executer_idx = 0;
3385 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3386
bb2a0f7a
YG
3387 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3388 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3389 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3390#ifdef __BIG_ENDIAN
3391 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3392#else
3393 DMAE_CMD_ENDIANITY_DW_SWAP |
3394#endif
3395 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3396 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3397 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3398 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3399 dmae->dst_addr_lo = bp->func_stx >> 2;
3400 dmae->dst_addr_hi = 0;
3401 dmae->len = sizeof(struct host_func_stats) >> 2;
3402 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3403 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3404 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3405
bb2a0f7a
YG
3406 *stats_comp = 0;
3407}
a2fbb9ea 3408
bb2a0f7a
YG
3409static void bnx2x_stats_start(struct bnx2x *bp)
3410{
3411 if (bp->port.pmf)
3412 bnx2x_port_stats_init(bp);
3413
3414 else if (bp->func_stx)
3415 bnx2x_func_stats_init(bp);
3416
3417 bnx2x_hw_stats_post(bp);
3418 bnx2x_storm_stats_post(bp);
3419}
3420
3421static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3422{
3423 bnx2x_stats_comp(bp);
3424 bnx2x_stats_pmf_update(bp);
3425 bnx2x_stats_start(bp);
3426}
3427
3428static void bnx2x_stats_restart(struct bnx2x *bp)
3429{
3430 bnx2x_stats_comp(bp);
3431 bnx2x_stats_start(bp);
3432}
3433
3434static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3435{
3436 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3437 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3438 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3439 struct regpair diff;
3440
3441 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3442 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3443 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3444 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3445 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3446 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3447 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3448 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3449 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3450 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3451 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3452 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3453 UPDATE_STAT64(tx_stat_gt127,
3454 tx_stat_etherstatspkts65octetsto127octets);
3455 UPDATE_STAT64(tx_stat_gt255,
3456 tx_stat_etherstatspkts128octetsto255octets);
3457 UPDATE_STAT64(tx_stat_gt511,
3458 tx_stat_etherstatspkts256octetsto511octets);
3459 UPDATE_STAT64(tx_stat_gt1023,
3460 tx_stat_etherstatspkts512octetsto1023octets);
3461 UPDATE_STAT64(tx_stat_gt1518,
3462 tx_stat_etherstatspkts1024octetsto1522octets);
3463 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3464 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3465 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3466 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3467 UPDATE_STAT64(tx_stat_gterr,
3468 tx_stat_dot3statsinternalmactransmiterrors);
3469 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3470
3471 estats->pause_frames_received_hi =
3472 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3473 estats->pause_frames_received_lo =
3474 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3475
3476 estats->pause_frames_sent_hi =
3477 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3478 estats->pause_frames_sent_lo =
3479 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3480}
3481
3482static void bnx2x_emac_stats_update(struct bnx2x *bp)
3483{
3484 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3485 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3486 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3487
3488 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3489 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3490 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3491 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3492 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3493 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3494 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3495 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3496 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3497 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3498 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3499 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3500 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3501 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3502 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3503 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3504 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3505 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3506 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3507 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3508 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3509 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3510 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3511 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3512 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3513 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3514 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3515 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3516 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3517 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3518 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3519
3520 estats->pause_frames_received_hi =
3521 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3522 estats->pause_frames_received_lo =
3523 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3524 ADD_64(estats->pause_frames_received_hi,
3525 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3526 estats->pause_frames_received_lo,
3527 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3528
3529 estats->pause_frames_sent_hi =
3530 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3531 estats->pause_frames_sent_lo =
3532 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3533 ADD_64(estats->pause_frames_sent_hi,
3534 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3535 estats->pause_frames_sent_lo,
3536 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3537}
3538
3539static int bnx2x_hw_stats_update(struct bnx2x *bp)
3540{
3541 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3542 struct nig_stats *old = &(bp->port.old_nig_stats);
3543 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3544 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3545 struct regpair diff;
de832a55 3546 u32 nig_timer_max;
bb2a0f7a
YG
3547
3548 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3549 bnx2x_bmac_stats_update(bp);
3550
3551 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3552 bnx2x_emac_stats_update(bp);
3553
3554 else { /* unreached */
3555 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3556 return -1;
3557 }
a2fbb9ea 3558
bb2a0f7a
YG
3559 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3560 new->brb_discard - old->brb_discard);
66e855f3
YG
3561 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3562 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3563
bb2a0f7a
YG
3564 UPDATE_STAT64_NIG(egress_mac_pkt0,
3565 etherstatspkts1024octetsto1522octets);
3566 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3567
bb2a0f7a 3568 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3569
bb2a0f7a
YG
3570 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3571 sizeof(struct mac_stx));
3572 estats->brb_drop_hi = pstats->brb_drop_hi;
3573 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3574
bb2a0f7a 3575 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3576
de832a55
EG
3577 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3578 if (nig_timer_max != estats->nig_timer_max) {
3579 estats->nig_timer_max = nig_timer_max;
3580 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3581 }
3582
bb2a0f7a 3583 return 0;
a2fbb9ea
ET
3584}
3585
bb2a0f7a 3586static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3587{
3588 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 3589 struct tstorm_per_port_stats *tport =
de832a55 3590 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
3591 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3592 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
3593 int i;
3594
3595 memset(&(fstats->total_bytes_received_hi), 0,
3596 sizeof(struct host_func_stats) - 2*sizeof(u32));
3597 estats->error_bytes_received_hi = 0;
3598 estats->error_bytes_received_lo = 0;
3599 estats->etherstatsoverrsizepkts_hi = 0;
3600 estats->etherstatsoverrsizepkts_lo = 0;
3601 estats->no_buff_discard_hi = 0;
3602 estats->no_buff_discard_lo = 0;
a2fbb9ea 3603
de832a55
EG
3604 for_each_queue(bp, i) {
3605 struct bnx2x_fastpath *fp = &bp->fp[i];
3606 int cl_id = fp->cl_id;
3607 struct tstorm_per_client_stats *tclient =
3608 &stats->tstorm_common.client_statistics[cl_id];
3609 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3610 struct ustorm_per_client_stats *uclient =
3611 &stats->ustorm_common.client_statistics[cl_id];
3612 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3613 struct xstorm_per_client_stats *xclient =
3614 &stats->xstorm_common.client_statistics[cl_id];
3615 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3616 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3617 u32 diff;
3618
3619 /* are storm stats valid? */
3620 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 3621 bp->stats_counter) {
de832a55
EG
3622 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3623 " xstorm counter (%d) != stats_counter (%d)\n",
3624 i, xclient->stats_counter, bp->stats_counter);
3625 return -1;
3626 }
3627 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 3628 bp->stats_counter) {
de832a55
EG
3629 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3630 " tstorm counter (%d) != stats_counter (%d)\n",
3631 i, tclient->stats_counter, bp->stats_counter);
3632 return -2;
3633 }
3634 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3635 bp->stats_counter) {
3636 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3637 " ustorm counter (%d) != stats_counter (%d)\n",
3638 i, uclient->stats_counter, bp->stats_counter);
3639 return -4;
3640 }
a2fbb9ea 3641
de832a55
EG
3642 qstats->total_bytes_received_hi =
3643 qstats->valid_bytes_received_hi =
a2fbb9ea 3644 le32_to_cpu(tclient->total_rcv_bytes.hi);
de832a55
EG
3645 qstats->total_bytes_received_lo =
3646 qstats->valid_bytes_received_lo =
a2fbb9ea 3647 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a 3648
de832a55 3649 qstats->error_bytes_received_hi =
bb2a0f7a 3650 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 3651 qstats->error_bytes_received_lo =
bb2a0f7a 3652 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 3653
de832a55
EG
3654 ADD_64(qstats->total_bytes_received_hi,
3655 qstats->error_bytes_received_hi,
3656 qstats->total_bytes_received_lo,
3657 qstats->error_bytes_received_lo);
3658
3659 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3660 total_unicast_packets_received);
3661 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3662 total_multicast_packets_received);
3663 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3664 total_broadcast_packets_received);
3665 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3666 etherstatsoverrsizepkts);
3667 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3668
3669 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3670 total_unicast_packets_received);
3671 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3672 total_multicast_packets_received);
3673 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3674 total_broadcast_packets_received);
3675 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3676 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3677 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3678
3679 qstats->total_bytes_transmitted_hi =
bb2a0f7a 3680 le32_to_cpu(xclient->total_sent_bytes.hi);
de832a55 3681 qstats->total_bytes_transmitted_lo =
bb2a0f7a
YG
3682 le32_to_cpu(xclient->total_sent_bytes.lo);
3683
de832a55
EG
3684 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3685 total_unicast_packets_transmitted);
3686 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3687 total_multicast_packets_transmitted);
3688 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3689 total_broadcast_packets_transmitted);
3690
3691 old_tclient->checksum_discard = tclient->checksum_discard;
3692 old_tclient->ttl0_discard = tclient->ttl0_discard;
3693
3694 ADD_64(fstats->total_bytes_received_hi,
3695 qstats->total_bytes_received_hi,
3696 fstats->total_bytes_received_lo,
3697 qstats->total_bytes_received_lo);
3698 ADD_64(fstats->total_bytes_transmitted_hi,
3699 qstats->total_bytes_transmitted_hi,
3700 fstats->total_bytes_transmitted_lo,
3701 qstats->total_bytes_transmitted_lo);
3702 ADD_64(fstats->total_unicast_packets_received_hi,
3703 qstats->total_unicast_packets_received_hi,
3704 fstats->total_unicast_packets_received_lo,
3705 qstats->total_unicast_packets_received_lo);
3706 ADD_64(fstats->total_multicast_packets_received_hi,
3707 qstats->total_multicast_packets_received_hi,
3708 fstats->total_multicast_packets_received_lo,
3709 qstats->total_multicast_packets_received_lo);
3710 ADD_64(fstats->total_broadcast_packets_received_hi,
3711 qstats->total_broadcast_packets_received_hi,
3712 fstats->total_broadcast_packets_received_lo,
3713 qstats->total_broadcast_packets_received_lo);
3714 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3715 qstats->total_unicast_packets_transmitted_hi,
3716 fstats->total_unicast_packets_transmitted_lo,
3717 qstats->total_unicast_packets_transmitted_lo);
3718 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3719 qstats->total_multicast_packets_transmitted_hi,
3720 fstats->total_multicast_packets_transmitted_lo,
3721 qstats->total_multicast_packets_transmitted_lo);
3722 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3723 qstats->total_broadcast_packets_transmitted_hi,
3724 fstats->total_broadcast_packets_transmitted_lo,
3725 qstats->total_broadcast_packets_transmitted_lo);
3726 ADD_64(fstats->valid_bytes_received_hi,
3727 qstats->valid_bytes_received_hi,
3728 fstats->valid_bytes_received_lo,
3729 qstats->valid_bytes_received_lo);
3730
3731 ADD_64(estats->error_bytes_received_hi,
3732 qstats->error_bytes_received_hi,
3733 estats->error_bytes_received_lo,
3734 qstats->error_bytes_received_lo);
3735 ADD_64(estats->etherstatsoverrsizepkts_hi,
3736 qstats->etherstatsoverrsizepkts_hi,
3737 estats->etherstatsoverrsizepkts_lo,
3738 qstats->etherstatsoverrsizepkts_lo);
3739 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3740 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3741 }
3742
3743 ADD_64(fstats->total_bytes_received_hi,
3744 estats->rx_stat_ifhcinbadoctets_hi,
3745 fstats->total_bytes_received_lo,
3746 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
3747
3748 memcpy(estats, &(fstats->total_bytes_received_hi),
3749 sizeof(struct host_func_stats) - 2*sizeof(u32));
3750
de832a55
EG
3751 ADD_64(estats->etherstatsoverrsizepkts_hi,
3752 estats->rx_stat_dot3statsframestoolong_hi,
3753 estats->etherstatsoverrsizepkts_lo,
3754 estats->rx_stat_dot3statsframestoolong_lo);
3755 ADD_64(estats->error_bytes_received_hi,
3756 estats->rx_stat_ifhcinbadoctets_hi,
3757 estats->error_bytes_received_lo,
3758 estats->rx_stat_ifhcinbadoctets_lo);
3759
3760 if (bp->port.pmf) {
3761 estats->mac_filter_discard =
3762 le32_to_cpu(tport->mac_filter_discard);
3763 estats->xxoverflow_discard =
3764 le32_to_cpu(tport->xxoverflow_discard);
3765 estats->brb_truncate_discard =
bb2a0f7a 3766 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
3767 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3768 }
bb2a0f7a
YG
3769
3770 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 3771
de832a55
EG
3772 bp->stats_pending = 0;
3773
a2fbb9ea
ET
3774 return 0;
3775}
3776
bb2a0f7a 3777static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3778{
bb2a0f7a 3779 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3780 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 3781 int i;
a2fbb9ea
ET
3782
3783 nstats->rx_packets =
3784 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3785 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3786 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3787
3788 nstats->tx_packets =
3789 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3790 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3791 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3792
de832a55 3793 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 3794
0e39e645 3795 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3796
de832a55
EG
3797 nstats->rx_dropped = estats->mac_discard;
3798 for_each_queue(bp, i)
3799 nstats->rx_dropped +=
3800 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3801
a2fbb9ea
ET
3802 nstats->tx_dropped = 0;
3803
3804 nstats->multicast =
de832a55 3805 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 3806
bb2a0f7a 3807 nstats->collisions =
de832a55 3808 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
3809
3810 nstats->rx_length_errors =
de832a55
EG
3811 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3812 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3813 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3814 bnx2x_hilo(&estats->brb_truncate_hi);
3815 nstats->rx_crc_errors =
3816 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3817 nstats->rx_frame_errors =
3818 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3819 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
3820 nstats->rx_missed_errors = estats->xxoverflow_discard;
3821
3822 nstats->rx_errors = nstats->rx_length_errors +
3823 nstats->rx_over_errors +
3824 nstats->rx_crc_errors +
3825 nstats->rx_frame_errors +
0e39e645
ET
3826 nstats->rx_fifo_errors +
3827 nstats->rx_missed_errors;
a2fbb9ea 3828
bb2a0f7a 3829 nstats->tx_aborted_errors =
de832a55
EG
3830 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3831 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3832 nstats->tx_carrier_errors =
3833 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
3834 nstats->tx_fifo_errors = 0;
3835 nstats->tx_heartbeat_errors = 0;
3836 nstats->tx_window_errors = 0;
3837
3838 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
3839 nstats->tx_carrier_errors +
3840 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3841}
3842
3843static void bnx2x_drv_stats_update(struct bnx2x *bp)
3844{
3845 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3846 int i;
3847
3848 estats->driver_xoff = 0;
3849 estats->rx_err_discard_pkt = 0;
3850 estats->rx_skb_alloc_failed = 0;
3851 estats->hw_csum_err = 0;
3852 for_each_queue(bp, i) {
3853 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3854
3855 estats->driver_xoff += qstats->driver_xoff;
3856 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3857 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3858 estats->hw_csum_err += qstats->hw_csum_err;
3859 }
a2fbb9ea
ET
3860}
3861
bb2a0f7a 3862static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3863{
bb2a0f7a 3864 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3865
bb2a0f7a
YG
3866 if (*stats_comp != DMAE_COMP_VAL)
3867 return;
3868
3869 if (bp->port.pmf)
de832a55 3870 bnx2x_hw_stats_update(bp);
a2fbb9ea 3871
de832a55
EG
3872 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3873 BNX2X_ERR("storm stats were not updated for 3 times\n");
3874 bnx2x_panic();
3875 return;
a2fbb9ea
ET
3876 }
3877
de832a55
EG
3878 bnx2x_net_stats_update(bp);
3879 bnx2x_drv_stats_update(bp);
3880
a2fbb9ea 3881 if (bp->msglevel & NETIF_MSG_TIMER) {
de832a55
EG
3882 struct tstorm_per_client_stats *old_tclient =
3883 &bp->fp->old_tclient;
3884 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 3885 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3886 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 3887 int i;
a2fbb9ea
ET
3888
3889 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3890 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3891 " tx pkt (%lx)\n",
3892 bnx2x_tx_avail(bp->fp),
7a9b2557 3893 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
3894 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3895 " rx pkt (%lx)\n",
7a9b2557
VZ
3896 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3897 bp->fp->rx_comp_cons),
3898 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
de832a55
EG
3899 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
3900 "brb truncate %u\n",
3901 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
3902 qstats->driver_xoff,
3903 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 3904 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 3905 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
3906 "mac_discard %u mac_filter_discard %u "
3907 "xxovrflow_discard %u brb_truncate_discard %u "
3908 "ttl0_discard %u\n",
bb2a0f7a 3909 old_tclient->checksum_discard,
de832a55
EG
3910 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
3911 bnx2x_hilo(&qstats->no_buff_discard_hi),
3912 estats->mac_discard, estats->mac_filter_discard,
3913 estats->xxoverflow_discard, estats->brb_truncate_discard,
bb2a0f7a 3914 old_tclient->ttl0_discard);
a2fbb9ea
ET
3915
3916 for_each_queue(bp, i) {
3917 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3918 bnx2x_fp(bp, i, tx_pkt),
3919 bnx2x_fp(bp, i, rx_pkt),
3920 bnx2x_fp(bp, i, rx_calls));
3921 }
3922 }
3923
bb2a0f7a
YG
3924 bnx2x_hw_stats_post(bp);
3925 bnx2x_storm_stats_post(bp);
3926}
a2fbb9ea 3927
bb2a0f7a
YG
3928static void bnx2x_port_stats_stop(struct bnx2x *bp)
3929{
3930 struct dmae_command *dmae;
3931 u32 opcode;
3932 int loader_idx = PMF_DMAE_C(bp);
3933 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3934
bb2a0f7a 3935 bp->executer_idx = 0;
a2fbb9ea 3936
bb2a0f7a
YG
3937 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3938 DMAE_CMD_C_ENABLE |
3939 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3940#ifdef __BIG_ENDIAN
bb2a0f7a 3941 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3942#else
bb2a0f7a 3943 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3944#endif
bb2a0f7a
YG
3945 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3946 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3947
3948 if (bp->port.port_stx) {
3949
3950 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3951 if (bp->func_stx)
3952 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3953 else
3954 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3955 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3956 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3957 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3958 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3959 dmae->len = sizeof(struct host_port_stats) >> 2;
3960 if (bp->func_stx) {
3961 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3962 dmae->comp_addr_hi = 0;
3963 dmae->comp_val = 1;
3964 } else {
3965 dmae->comp_addr_lo =
3966 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3967 dmae->comp_addr_hi =
3968 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3969 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3970
bb2a0f7a
YG
3971 *stats_comp = 0;
3972 }
a2fbb9ea
ET
3973 }
3974
bb2a0f7a
YG
3975 if (bp->func_stx) {
3976
3977 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3978 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3979 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3980 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3981 dmae->dst_addr_lo = bp->func_stx >> 2;
3982 dmae->dst_addr_hi = 0;
3983 dmae->len = sizeof(struct host_func_stats) >> 2;
3984 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3985 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3986 dmae->comp_val = DMAE_COMP_VAL;
3987
3988 *stats_comp = 0;
a2fbb9ea 3989 }
bb2a0f7a
YG
3990}
3991
3992static void bnx2x_stats_stop(struct bnx2x *bp)
3993{
3994 int update = 0;
3995
3996 bnx2x_stats_comp(bp);
3997
3998 if (bp->port.pmf)
3999 update = (bnx2x_hw_stats_update(bp) == 0);
4000
4001 update |= (bnx2x_storm_stats_update(bp) == 0);
4002
4003 if (update) {
4004 bnx2x_net_stats_update(bp);
a2fbb9ea 4005
bb2a0f7a
YG
4006 if (bp->port.pmf)
4007 bnx2x_port_stats_stop(bp);
4008
4009 bnx2x_hw_stats_post(bp);
4010 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4011 }
4012}
4013
bb2a0f7a
YG
4014static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4015{
4016}
4017
4018static const struct {
4019 void (*action)(struct bnx2x *bp);
4020 enum bnx2x_stats_state next_state;
4021} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4022/* state event */
4023{
4024/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4025/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4026/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4027/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4028},
4029{
4030/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4031/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4032/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4033/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4034}
4035};
4036
4037static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4038{
4039 enum bnx2x_stats_state state = bp->stats_state;
4040
4041 bnx2x_stats_stm[state][event].action(bp);
4042 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4043
4044 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4045 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4046 state, event, bp->stats_state);
4047}
4048
a2fbb9ea
ET
4049static void bnx2x_timer(unsigned long data)
4050{
4051 struct bnx2x *bp = (struct bnx2x *) data;
4052
4053 if (!netif_running(bp->dev))
4054 return;
4055
4056 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4057 goto timer_restart;
a2fbb9ea
ET
4058
4059 if (poll) {
4060 struct bnx2x_fastpath *fp = &bp->fp[0];
4061 int rc;
4062
4063 bnx2x_tx_int(fp, 1000);
4064 rc = bnx2x_rx_int(fp, 1000);
4065 }
4066
34f80b04
EG
4067 if (!BP_NOMCP(bp)) {
4068 int func = BP_FUNC(bp);
a2fbb9ea
ET
4069 u32 drv_pulse;
4070 u32 mcp_pulse;
4071
4072 ++bp->fw_drv_pulse_wr_seq;
4073 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4074 /* TBD - add SYSTEM_TIME */
4075 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4076 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4077
34f80b04 4078 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4079 MCP_PULSE_SEQ_MASK);
4080 /* The delta between driver pulse and mcp response
4081 * should be 1 (before mcp response) or 0 (after mcp response)
4082 */
4083 if ((drv_pulse != mcp_pulse) &&
4084 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4085 /* someone lost a heartbeat... */
4086 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4087 drv_pulse, mcp_pulse);
4088 }
4089 }
4090
bb2a0f7a
YG
4091 if ((bp->state == BNX2X_STATE_OPEN) ||
4092 (bp->state == BNX2X_STATE_DISABLED))
4093 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4094
f1410647 4095timer_restart:
a2fbb9ea
ET
4096 mod_timer(&bp->timer, jiffies + bp->current_interval);
4097}
4098
4099/* end of Statistics */
4100
4101/* nic init */
4102
4103/*
4104 * nic init service functions
4105 */
4106
34f80b04 4107static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4108{
34f80b04
EG
4109 int port = BP_PORT(bp);
4110
4111 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4112 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4113 sizeof(struct ustorm_status_block)/4);
34f80b04
EG
4114 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4115 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4116 sizeof(struct cstorm_status_block)/4);
34f80b04
EG
4117}
4118
5c862848
EG
4119static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4120 dma_addr_t mapping, int sb_id)
34f80b04
EG
4121{
4122 int port = BP_PORT(bp);
bb2a0f7a 4123 int func = BP_FUNC(bp);
a2fbb9ea 4124 int index;
34f80b04 4125 u64 section;
a2fbb9ea
ET
4126
4127 /* USTORM */
4128 section = ((u64)mapping) + offsetof(struct host_status_block,
4129 u_status_block);
34f80b04 4130 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4131
4132 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4133 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4134 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4135 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4136 U64_HI(section));
bb2a0f7a
YG
4137 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4138 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4139
4140 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4141 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4142 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4143
4144 /* CSTORM */
4145 section = ((u64)mapping) + offsetof(struct host_status_block,
4146 c_status_block);
34f80b04 4147 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4148
4149 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4150 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4151 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4152 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4153 U64_HI(section));
7a9b2557
VZ
4154 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4155 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4156
4157 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4158 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4159 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4160
4161 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4162}
4163
4164static void bnx2x_zero_def_sb(struct bnx2x *bp)
4165{
4166 int func = BP_FUNC(bp);
a2fbb9ea 4167
34f80b04
EG
4168 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4169 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4170 sizeof(struct ustorm_def_status_block)/4);
4171 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4172 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4173 sizeof(struct cstorm_def_status_block)/4);
4174 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4175 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4176 sizeof(struct xstorm_def_status_block)/4);
4177 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4178 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4179 sizeof(struct tstorm_def_status_block)/4);
a2fbb9ea
ET
4180}
4181
4182static void bnx2x_init_def_sb(struct bnx2x *bp,
4183 struct host_def_status_block *def_sb,
34f80b04 4184 dma_addr_t mapping, int sb_id)
a2fbb9ea 4185{
34f80b04
EG
4186 int port = BP_PORT(bp);
4187 int func = BP_FUNC(bp);
a2fbb9ea
ET
4188 int index, val, reg_offset;
4189 u64 section;
4190
4191 /* ATTN */
4192 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4193 atten_status_block);
34f80b04 4194 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4195
49d66772
ET
4196 bp->attn_state = 0;
4197
a2fbb9ea
ET
4198 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4199 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4200
34f80b04 4201 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4202 bp->attn_group[index].sig[0] = REG_RD(bp,
4203 reg_offset + 0x10*index);
4204 bp->attn_group[index].sig[1] = REG_RD(bp,
4205 reg_offset + 0x4 + 0x10*index);
4206 bp->attn_group[index].sig[2] = REG_RD(bp,
4207 reg_offset + 0x8 + 0x10*index);
4208 bp->attn_group[index].sig[3] = REG_RD(bp,
4209 reg_offset + 0xc + 0x10*index);
4210 }
4211
a2fbb9ea
ET
4212 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4213 HC_REG_ATTN_MSG0_ADDR_L);
4214
4215 REG_WR(bp, reg_offset, U64_LO(section));
4216 REG_WR(bp, reg_offset + 4, U64_HI(section));
4217
4218 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4219
4220 val = REG_RD(bp, reg_offset);
34f80b04 4221 val |= sb_id;
a2fbb9ea
ET
4222 REG_WR(bp, reg_offset, val);
4223
4224 /* USTORM */
4225 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4226 u_def_status_block);
34f80b04 4227 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4228
4229 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4230 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4231 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4232 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4233 U64_HI(section));
5c862848 4234 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
34f80b04 4235 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4236
4237 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4238 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4239 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4240
4241 /* CSTORM */
4242 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4243 c_def_status_block);
34f80b04 4244 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4245
4246 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4247 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4248 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4249 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4250 U64_HI(section));
5c862848 4251 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
34f80b04 4252 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4253
4254 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4255 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4256 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4257
4258 /* TSTORM */
4259 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4260 t_def_status_block);
34f80b04 4261 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4262
4263 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4264 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4265 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4266 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4267 U64_HI(section));
5c862848 4268 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4269 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4270
4271 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4272 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4273 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4274
4275 /* XSTORM */
4276 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4277 x_def_status_block);
34f80b04 4278 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4279
4280 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4281 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4282 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4283 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4284 U64_HI(section));
5c862848 4285 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4286 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4287
4288 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4289 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4290 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4291
bb2a0f7a 4292 bp->stats_pending = 0;
66e855f3 4293 bp->set_mac_pending = 0;
bb2a0f7a 4294
34f80b04 4295 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4296}
4297
4298static void bnx2x_update_coalesce(struct bnx2x *bp)
4299{
34f80b04 4300 int port = BP_PORT(bp);
a2fbb9ea
ET
4301 int i;
4302
4303 for_each_queue(bp, i) {
34f80b04 4304 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4305
4306 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4307 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4308 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4309 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4310 bp->rx_ticks/12);
a2fbb9ea 4311 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4312 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848
EG
4313 U_SB_ETH_RX_CQ_INDEX),
4314 bp->rx_ticks ? 0 : 1);
a2fbb9ea
ET
4315
4316 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4317 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4318 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4319 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4320 bp->tx_ticks/12);
a2fbb9ea 4321 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4322 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4323 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4324 bp->tx_ticks ? 0 : 1);
a2fbb9ea
ET
4325 }
4326}
4327
7a9b2557
VZ
4328static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4329 struct bnx2x_fastpath *fp, int last)
4330{
4331 int i;
4332
4333 for (i = 0; i < last; i++) {
4334 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4335 struct sk_buff *skb = rx_buf->skb;
4336
4337 if (skb == NULL) {
4338 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4339 continue;
4340 }
4341
4342 if (fp->tpa_state[i] == BNX2X_TPA_START)
4343 pci_unmap_single(bp->pdev,
4344 pci_unmap_addr(rx_buf, mapping),
437cf2f1 4345 bp->rx_buf_size,
7a9b2557
VZ
4346 PCI_DMA_FROMDEVICE);
4347
4348 dev_kfree_skb(skb);
4349 rx_buf->skb = NULL;
4350 }
4351}
4352
a2fbb9ea
ET
4353static void bnx2x_init_rx_rings(struct bnx2x *bp)
4354{
7a9b2557 4355 int func = BP_FUNC(bp);
32626230
EG
4356 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4357 ETH_MAX_AGGREGATION_QUEUES_E1H;
4358 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4359 int i, j;
a2fbb9ea 4360
0f00846d
EG
4361 bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4362 DP(NETIF_MSG_IFUP,
4363 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4364
7a9b2557 4365 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4366
555f6c78 4367 for_each_rx_queue(bp, j) {
32626230 4368 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4369
32626230 4370 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4371 fp->tpa_pool[i].skb =
4372 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4373 if (!fp->tpa_pool[i].skb) {
4374 BNX2X_ERR("Failed to allocate TPA "
4375 "skb pool for queue[%d] - "
4376 "disabling TPA on this "
4377 "queue!\n", j);
4378 bnx2x_free_tpa_pool(bp, fp, i);
4379 fp->disable_tpa = 1;
4380 break;
4381 }
4382 pci_unmap_addr_set((struct sw_rx_bd *)
4383 &bp->fp->tpa_pool[i],
4384 mapping, 0);
4385 fp->tpa_state[i] = BNX2X_TPA_STOP;
4386 }
4387 }
4388 }
4389
555f6c78 4390 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
4391 struct bnx2x_fastpath *fp = &bp->fp[j];
4392
4393 fp->rx_bd_cons = 0;
4394 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4395 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4396
4397 /* "next page" elements initialization */
4398 /* SGE ring */
4399 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4400 struct eth_rx_sge *sge;
4401
4402 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4403 sge->addr_hi =
4404 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4405 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4406 sge->addr_lo =
4407 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4408 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4409 }
4410
4411 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4412
7a9b2557 4413 /* RX BD ring */
a2fbb9ea
ET
4414 for (i = 1; i <= NUM_RX_RINGS; i++) {
4415 struct eth_rx_bd *rx_bd;
4416
4417 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4418 rx_bd->addr_hi =
4419 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4420 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4421 rx_bd->addr_lo =
4422 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4423 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4424 }
4425
34f80b04 4426 /* CQ ring */
a2fbb9ea
ET
4427 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4428 struct eth_rx_cqe_next_page *nextpg;
4429
4430 nextpg = (struct eth_rx_cqe_next_page *)
4431 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4432 nextpg->addr_hi =
4433 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4434 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4435 nextpg->addr_lo =
4436 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4437 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4438 }
4439
7a9b2557
VZ
4440 /* Allocate SGEs and initialize the ring elements */
4441 for (i = 0, ring_prod = 0;
4442 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4443
7a9b2557
VZ
4444 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4445 BNX2X_ERR("was only able to allocate "
4446 "%d rx sges\n", i);
4447 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4448 /* Cleanup already allocated elements */
4449 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4450 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4451 fp->disable_tpa = 1;
4452 ring_prod = 0;
4453 break;
4454 }
4455 ring_prod = NEXT_SGE_IDX(ring_prod);
4456 }
4457 fp->rx_sge_prod = ring_prod;
4458
4459 /* Allocate BDs and initialize BD ring */
66e855f3 4460 fp->rx_comp_cons = 0;
7a9b2557 4461 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4462 for (i = 0; i < bp->rx_ring_size; i++) {
4463 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4464 BNX2X_ERR("was only able to allocate "
de832a55
EG
4465 "%d rx skbs on queue[%d]\n", i, j);
4466 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4467 break;
4468 }
4469 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4470 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4471 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4472 }
4473
7a9b2557
VZ
4474 fp->rx_bd_prod = ring_prod;
4475 /* must not have more available CQEs than BDs */
4476 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4477 cqe_ring_prod);
a2fbb9ea
ET
4478 fp->rx_pkt = fp->rx_calls = 0;
4479
7a9b2557
VZ
4480 /* Warning!
4481 * this will generate an interrupt (to the TSTORM)
4482 * must only be done after chip is initialized
4483 */
4484 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4485 fp->rx_sge_prod);
a2fbb9ea
ET
4486 if (j != 0)
4487 continue;
4488
4489 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4490 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4491 U64_LO(fp->rx_comp_mapping));
4492 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4493 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4494 U64_HI(fp->rx_comp_mapping));
4495 }
4496}
4497
4498static void bnx2x_init_tx_ring(struct bnx2x *bp)
4499{
4500 int i, j;
4501
555f6c78 4502 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
4503 struct bnx2x_fastpath *fp = &bp->fp[j];
4504
4505 for (i = 1; i <= NUM_TX_RINGS; i++) {
4506 struct eth_tx_bd *tx_bd =
4507 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4508
4509 tx_bd->addr_hi =
4510 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4511 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4512 tx_bd->addr_lo =
4513 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4514 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4515 }
4516
4517 fp->tx_pkt_prod = 0;
4518 fp->tx_pkt_cons = 0;
4519 fp->tx_bd_prod = 0;
4520 fp->tx_bd_cons = 0;
4521 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4522 fp->tx_pkt = 0;
4523 }
4524}
4525
4526static void bnx2x_init_sp_ring(struct bnx2x *bp)
4527{
34f80b04 4528 int func = BP_FUNC(bp);
a2fbb9ea
ET
4529
4530 spin_lock_init(&bp->spq_lock);
4531
4532 bp->spq_left = MAX_SPQ_PENDING;
4533 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4534 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4535 bp->spq_prod_bd = bp->spq;
4536 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4537
34f80b04 4538 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4539 U64_LO(bp->spq_mapping));
34f80b04
EG
4540 REG_WR(bp,
4541 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4542 U64_HI(bp->spq_mapping));
4543
34f80b04 4544 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4545 bp->spq_prod_idx);
4546}
4547
4548static void bnx2x_init_context(struct bnx2x *bp)
4549{
4550 int i;
4551
4552 for_each_queue(bp, i) {
4553 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4554 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 4555 u8 cl_id = fp->cl_id;
34f80b04 4556 u8 sb_id = FP_SB_ID(fp);
a2fbb9ea 4557
34f80b04
EG
4558 context->ustorm_st_context.common.sb_index_numbers =
4559 BNX2X_RX_SB_INDEX_NUM;
4560 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4561 context->ustorm_st_context.common.status_block_id = sb_id;
4562 context->ustorm_st_context.common.flags =
de832a55
EG
4563 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4564 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4565 context->ustorm_st_context.common.statistics_counter_id =
4566 cl_id;
8d9c5f34 4567 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 4568 BNX2X_RX_ALIGN_SHIFT;
34f80b04 4569 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 4570 bp->rx_buf_size;
34f80b04 4571 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4572 U64_HI(fp->rx_desc_mapping);
34f80b04 4573 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4574 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4575 if (!fp->disable_tpa) {
4576 context->ustorm_st_context.common.flags |=
4577 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4578 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4579 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
4580 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4581 (u32)0xffff);
7a9b2557
VZ
4582 context->ustorm_st_context.common.sge_page_base_hi =
4583 U64_HI(fp->rx_sge_mapping);
4584 context->ustorm_st_context.common.sge_page_base_lo =
4585 U64_LO(fp->rx_sge_mapping);
4586 }
4587
8d9c5f34
EG
4588 context->ustorm_ag_context.cdu_usage =
4589 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4590 CDU_REGION_NUMBER_UCM_AG,
4591 ETH_CONNECTION_TYPE);
4592
4593 context->xstorm_st_context.tx_bd_page_base_hi =
4594 U64_HI(fp->tx_desc_mapping);
4595 context->xstorm_st_context.tx_bd_page_base_lo =
4596 U64_LO(fp->tx_desc_mapping);
4597 context->xstorm_st_context.db_data_addr_hi =
4598 U64_HI(fp->tx_prods_mapping);
4599 context->xstorm_st_context.db_data_addr_lo =
4600 U64_LO(fp->tx_prods_mapping);
4601 context->xstorm_st_context.statistics_data = (fp->cl_id |
4602 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea 4603 context->cstorm_st_context.sb_index_number =
5c862848 4604 C_SB_ETH_TX_CQ_INDEX;
34f80b04 4605 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4606
4607 context->xstorm_ag_context.cdu_reserved =
4608 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4609 CDU_REGION_NUMBER_XCM_AG,
4610 ETH_CONNECTION_TYPE);
a2fbb9ea
ET
4611 }
4612}
4613
4614static void bnx2x_init_ind_table(struct bnx2x *bp)
4615{
26c8fa4d 4616 int func = BP_FUNC(bp);
a2fbb9ea
ET
4617 int i;
4618
555f6c78 4619 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
4620 return;
4621
555f6c78
EG
4622 DP(NETIF_MSG_IFUP,
4623 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 4624 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 4625 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 4626 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
555f6c78 4627 BP_CL_ID(bp) + (i % bp->num_rx_queues));
a2fbb9ea
ET
4628}
4629
49d66772
ET
4630static void bnx2x_set_client_config(struct bnx2x *bp)
4631{
49d66772 4632 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4633 int port = BP_PORT(bp);
4634 int i;
49d66772 4635
e7799c5f 4636 tstorm_client.mtu = bp->dev->mtu;
49d66772 4637 tstorm_client.config_flags =
de832a55
EG
4638 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4639 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 4640#ifdef BCM_VLAN
0c6671b0 4641 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 4642 tstorm_client.config_flags |=
8d9c5f34 4643 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
4644 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4645 }
4646#endif
49d66772 4647
7a9b2557
VZ
4648 if (bp->flags & TPA_ENABLE_FLAG) {
4649 tstorm_client.max_sges_for_packet =
4f40f2cb 4650 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
4651 tstorm_client.max_sges_for_packet =
4652 ((tstorm_client.max_sges_for_packet +
4653 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4654 PAGES_PER_SGE_SHIFT;
4655
4656 tstorm_client.config_flags |=
4657 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4658 }
4659
49d66772 4660 for_each_queue(bp, i) {
de832a55
EG
4661 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4662
49d66772 4663 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4664 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4665 ((u32 *)&tstorm_client)[0]);
4666 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4667 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4668 ((u32 *)&tstorm_client)[1]);
4669 }
4670
34f80b04
EG
4671 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4672 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4673}
4674
a2fbb9ea
ET
4675static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4676{
a2fbb9ea 4677 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4678 int mode = bp->rx_mode;
4679 int mask = (1 << BP_L_ID(bp));
4680 int func = BP_FUNC(bp);
a2fbb9ea
ET
4681 int i;
4682
3196a88a 4683 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
4684
4685 switch (mode) {
4686 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4687 tstorm_mac_filter.ucast_drop_all = mask;
4688 tstorm_mac_filter.mcast_drop_all = mask;
4689 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea
ET
4690 break;
4691 case BNX2X_RX_MODE_NORMAL:
34f80b04 4692 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4693 break;
4694 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4695 tstorm_mac_filter.mcast_accept_all = mask;
4696 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4697 break;
4698 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4699 tstorm_mac_filter.ucast_accept_all = mask;
4700 tstorm_mac_filter.mcast_accept_all = mask;
4701 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4702 break;
4703 default:
34f80b04
EG
4704 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4705 break;
a2fbb9ea
ET
4706 }
4707
4708 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4709 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4710 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4711 ((u32 *)&tstorm_mac_filter)[i]);
4712
34f80b04 4713/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4714 ((u32 *)&tstorm_mac_filter)[i]); */
4715 }
a2fbb9ea 4716
49d66772
ET
4717 if (mode != BNX2X_RX_MODE_NONE)
4718 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4719}
4720
471de716
EG
4721static void bnx2x_init_internal_common(struct bnx2x *bp)
4722{
4723 int i;
4724
3cdf1db7
YG
4725 if (bp->flags & TPA_ENABLE_FLAG) {
4726 struct tstorm_eth_tpa_exist tpa = {0};
4727
4728 tpa.tpa_exist = 1;
4729
4730 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4731 ((u32 *)&tpa)[0]);
4732 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4733 ((u32 *)&tpa)[1]);
4734 }
4735
471de716
EG
4736 /* Zero this manually as its initialization is
4737 currently missing in the initTool */
4738 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4739 REG_WR(bp, BAR_USTRORM_INTMEM +
4740 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4741}
4742
4743static void bnx2x_init_internal_port(struct bnx2x *bp)
4744{
4745 int port = BP_PORT(bp);
4746
4747 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4748 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4749 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4750 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4751}
4752
8a1c38d1
EG
4753/* Calculates the sum of vn_min_rates.
4754 It's needed for further normalizing of the min_rates.
4755 Returns:
4756 sum of vn_min_rates.
4757 or
4758 0 - if all the min_rates are 0.
4759 In the later case fainess algorithm should be deactivated.
4760 If not all min_rates are zero then those that are zeroes will be set to 1.
4761 */
4762static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4763{
4764 int all_zero = 1;
4765 int port = BP_PORT(bp);
4766 int vn;
4767
4768 bp->vn_weight_sum = 0;
4769 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4770 int func = 2*vn + port;
4771 u32 vn_cfg =
4772 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4773 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4774 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4775
4776 /* Skip hidden vns */
4777 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4778 continue;
4779
4780 /* If min rate is zero - set it to 1 */
4781 if (!vn_min_rate)
4782 vn_min_rate = DEF_MIN_RATE;
4783 else
4784 all_zero = 0;
4785
4786 bp->vn_weight_sum += vn_min_rate;
4787 }
4788
4789 /* ... only if all min rates are zeros - disable fairness */
4790 if (all_zero)
4791 bp->vn_weight_sum = 0;
4792}
4793
471de716 4794static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4795{
a2fbb9ea
ET
4796 struct tstorm_eth_function_common_config tstorm_config = {0};
4797 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4798 int port = BP_PORT(bp);
4799 int func = BP_FUNC(bp);
de832a55
EG
4800 int i, j;
4801 u32 offset;
471de716 4802 u16 max_agg_size;
a2fbb9ea
ET
4803
4804 if (is_multi(bp)) {
555f6c78 4805 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
4806 tstorm_config.rss_result_mask = MULTI_MASK;
4807 }
8d9c5f34
EG
4808 if (IS_E1HMF(bp))
4809 tstorm_config.config_flags |=
4810 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 4811
34f80b04
EG
4812 tstorm_config.leading_client_id = BP_L_ID(bp);
4813
a2fbb9ea 4814 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4815 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4816 (*(u32 *)&tstorm_config));
4817
c14423fe 4818 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4819 bnx2x_set_storm_rx_mode(bp);
4820
de832a55
EG
4821 for_each_queue(bp, i) {
4822 u8 cl_id = bp->fp[i].cl_id;
4823
4824 /* reset xstorm per client statistics */
4825 offset = BAR_XSTRORM_INTMEM +
4826 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4827 for (j = 0;
4828 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4829 REG_WR(bp, offset + j*4, 0);
4830
4831 /* reset tstorm per client statistics */
4832 offset = BAR_TSTRORM_INTMEM +
4833 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4834 for (j = 0;
4835 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4836 REG_WR(bp, offset + j*4, 0);
4837
4838 /* reset ustorm per client statistics */
4839 offset = BAR_USTRORM_INTMEM +
4840 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4841 for (j = 0;
4842 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4843 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
4844 }
4845
4846 /* Init statistics related context */
34f80b04 4847 stats_flags.collect_eth = 1;
a2fbb9ea 4848
66e855f3 4849 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4850 ((u32 *)&stats_flags)[0]);
66e855f3 4851 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4852 ((u32 *)&stats_flags)[1]);
4853
66e855f3 4854 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4855 ((u32 *)&stats_flags)[0]);
66e855f3 4856 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4857 ((u32 *)&stats_flags)[1]);
4858
de832a55
EG
4859 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4860 ((u32 *)&stats_flags)[0]);
4861 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4862 ((u32 *)&stats_flags)[1]);
4863
66e855f3 4864 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4865 ((u32 *)&stats_flags)[0]);
66e855f3 4866 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4867 ((u32 *)&stats_flags)[1]);
4868
66e855f3
YG
4869 REG_WR(bp, BAR_XSTRORM_INTMEM +
4870 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4871 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4872 REG_WR(bp, BAR_XSTRORM_INTMEM +
4873 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4874 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4875
4876 REG_WR(bp, BAR_TSTRORM_INTMEM +
4877 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4878 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4879 REG_WR(bp, BAR_TSTRORM_INTMEM +
4880 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4881 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 4882
de832a55
EG
4883 REG_WR(bp, BAR_USTRORM_INTMEM +
4884 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4885 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4886 REG_WR(bp, BAR_USTRORM_INTMEM +
4887 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4888 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4889
34f80b04
EG
4890 if (CHIP_IS_E1H(bp)) {
4891 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4892 IS_E1HMF(bp));
4893 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4894 IS_E1HMF(bp));
4895 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4896 IS_E1HMF(bp));
4897 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4898 IS_E1HMF(bp));
4899
7a9b2557
VZ
4900 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4901 bp->e1hov);
34f80b04
EG
4902 }
4903
4f40f2cb
EG
4904 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4905 max_agg_size =
4906 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4907 SGE_PAGE_SIZE * PAGES_PER_SGE),
4908 (u32)0xffff);
555f6c78 4909 for_each_rx_queue(bp, i) {
7a9b2557 4910 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
4911
4912 REG_WR(bp, BAR_USTRORM_INTMEM +
4913 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4914 U64_LO(fp->rx_comp_mapping));
4915 REG_WR(bp, BAR_USTRORM_INTMEM +
4916 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4917 U64_HI(fp->rx_comp_mapping));
4918
7a9b2557
VZ
4919 REG_WR16(bp, BAR_USTRORM_INTMEM +
4920 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4921 max_agg_size);
4922 }
8a1c38d1 4923
1c06328c
EG
4924 /* dropless flow control */
4925 if (CHIP_IS_E1H(bp)) {
4926 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
4927
4928 rx_pause.bd_thr_low = 250;
4929 rx_pause.cqe_thr_low = 250;
4930 rx_pause.cos = 1;
4931 rx_pause.sge_thr_low = 0;
4932 rx_pause.bd_thr_high = 350;
4933 rx_pause.cqe_thr_high = 350;
4934 rx_pause.sge_thr_high = 0;
4935
4936 for_each_rx_queue(bp, i) {
4937 struct bnx2x_fastpath *fp = &bp->fp[i];
4938
4939 if (!fp->disable_tpa) {
4940 rx_pause.sge_thr_low = 150;
4941 rx_pause.sge_thr_high = 250;
4942 }
4943
4944
4945 offset = BAR_USTRORM_INTMEM +
4946 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
4947 fp->cl_id);
4948 for (j = 0;
4949 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
4950 j++)
4951 REG_WR(bp, offset + j*4,
4952 ((u32 *)&rx_pause)[j]);
4953 }
4954 }
4955
8a1c38d1
EG
4956 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
4957
4958 /* Init rate shaping and fairness contexts */
4959 if (IS_E1HMF(bp)) {
4960 int vn;
4961
4962 /* During init there is no active link
4963 Until link is up, set link rate to 10Gbps */
4964 bp->link_vars.line_speed = SPEED_10000;
4965 bnx2x_init_port_minmax(bp);
4966
4967 bnx2x_calc_vn_weight_sum(bp);
4968
4969 for (vn = VN_0; vn < E1HVN_MAX; vn++)
4970 bnx2x_init_vn_minmax(bp, 2*vn + port);
4971
4972 /* Enable rate shaping and fairness */
4973 bp->cmng.flags.cmng_enables =
4974 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
4975 if (bp->vn_weight_sum)
4976 bp->cmng.flags.cmng_enables |=
4977 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
4978 else
4979 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
4980 " fairness will be disabled\n");
4981 } else {
4982 /* rate shaping and fairness are disabled */
4983 DP(NETIF_MSG_IFUP,
4984 "single function mode minmax will be disabled\n");
4985 }
4986
4987
4988 /* Store it to internal memory */
4989 if (bp->port.pmf)
4990 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
4991 REG_WR(bp, BAR_XSTRORM_INTMEM +
4992 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
4993 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
4994}
4995
471de716
EG
4996static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4997{
4998 switch (load_code) {
4999 case FW_MSG_CODE_DRV_LOAD_COMMON:
5000 bnx2x_init_internal_common(bp);
5001 /* no break */
5002
5003 case FW_MSG_CODE_DRV_LOAD_PORT:
5004 bnx2x_init_internal_port(bp);
5005 /* no break */
5006
5007 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5008 bnx2x_init_internal_func(bp);
5009 break;
5010
5011 default:
5012 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5013 break;
5014 }
5015}
5016
5017static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5018{
5019 int i;
5020
5021 for_each_queue(bp, i) {
5022 struct bnx2x_fastpath *fp = &bp->fp[i];
5023
34f80b04 5024 fp->bp = bp;
a2fbb9ea 5025 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5026 fp->index = i;
34f80b04
EG
5027 fp->cl_id = BP_L_ID(bp) + i;
5028 fp->sb_id = fp->cl_id;
5029 DP(NETIF_MSG_IFUP,
5030 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
5031 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
5c862848
EG
5032 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5033 FP_SB_ID(fp));
5034 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5035 }
5036
5c862848
EG
5037 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5038 DEF_SB_ID);
5039 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5040 bnx2x_update_coalesce(bp);
5041 bnx2x_init_rx_rings(bp);
5042 bnx2x_init_tx_ring(bp);
5043 bnx2x_init_sp_ring(bp);
5044 bnx2x_init_context(bp);
471de716 5045 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5046 bnx2x_init_ind_table(bp);
0ef00459
EG
5047 bnx2x_stats_init(bp);
5048
5049 /* At this point, we are ready for interrupts */
5050 atomic_set(&bp->intr_sem, 0);
5051
5052 /* flush all before enabling interrupts */
5053 mb();
5054 mmiowb();
5055
615f8fd9 5056 bnx2x_int_enable(bp);
a2fbb9ea
ET
5057}
5058
5059/* end of nic init */
5060
5061/*
5062 * gzip service functions
5063 */
5064
5065static int bnx2x_gunzip_init(struct bnx2x *bp)
5066{
5067 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5068 &bp->gunzip_mapping);
5069 if (bp->gunzip_buf == NULL)
5070 goto gunzip_nomem1;
5071
5072 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5073 if (bp->strm == NULL)
5074 goto gunzip_nomem2;
5075
5076 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5077 GFP_KERNEL);
5078 if (bp->strm->workspace == NULL)
5079 goto gunzip_nomem3;
5080
5081 return 0;
5082
5083gunzip_nomem3:
5084 kfree(bp->strm);
5085 bp->strm = NULL;
5086
5087gunzip_nomem2:
5088 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5089 bp->gunzip_mapping);
5090 bp->gunzip_buf = NULL;
5091
5092gunzip_nomem1:
5093 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 5094 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
5095 return -ENOMEM;
5096}
5097
5098static void bnx2x_gunzip_end(struct bnx2x *bp)
5099{
5100 kfree(bp->strm->workspace);
5101
5102 kfree(bp->strm);
5103 bp->strm = NULL;
5104
5105 if (bp->gunzip_buf) {
5106 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5107 bp->gunzip_mapping);
5108 bp->gunzip_buf = NULL;
5109 }
5110}
5111
5112static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5113{
5114 int n, rc;
5115
5116 /* check gzip header */
5117 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5118 return -EINVAL;
5119
5120 n = 10;
5121
34f80b04 5122#define FNAME 0x8
a2fbb9ea
ET
5123
5124 if (zbuf[3] & FNAME)
5125 while ((zbuf[n++] != 0) && (n < len));
5126
5127 bp->strm->next_in = zbuf + n;
5128 bp->strm->avail_in = len - n;
5129 bp->strm->next_out = bp->gunzip_buf;
5130 bp->strm->avail_out = FW_BUF_SIZE;
5131
5132 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5133 if (rc != Z_OK)
5134 return rc;
5135
5136 rc = zlib_inflate(bp->strm, Z_FINISH);
5137 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5138 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5139 bp->dev->name, bp->strm->msg);
5140
5141 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5142 if (bp->gunzip_outlen & 0x3)
5143 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5144 " gunzip_outlen (%d) not aligned\n",
5145 bp->dev->name, bp->gunzip_outlen);
5146 bp->gunzip_outlen >>= 2;
5147
5148 zlib_inflateEnd(bp->strm);
5149
5150 if (rc == Z_STREAM_END)
5151 return 0;
5152
5153 return rc;
5154}
5155
5156/* nic load/unload */
5157
5158/*
34f80b04 5159 * General service functions
a2fbb9ea
ET
5160 */
5161
5162/* send a NIG loopback debug packet */
5163static void bnx2x_lb_pckt(struct bnx2x *bp)
5164{
a2fbb9ea 5165 u32 wb_write[3];
a2fbb9ea
ET
5166
5167 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5168 wb_write[0] = 0x55555555;
5169 wb_write[1] = 0x55555555;
34f80b04 5170 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5171 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5172
5173 /* NON-IP protocol */
a2fbb9ea
ET
5174 wb_write[0] = 0x09000000;
5175 wb_write[1] = 0x55555555;
34f80b04 5176 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5177 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5178}
5179
5180/* some of the internal memories
5181 * are not directly readable from the driver
5182 * to test them we send debug packets
5183 */
5184static int bnx2x_int_mem_test(struct bnx2x *bp)
5185{
5186 int factor;
5187 int count, i;
5188 u32 val = 0;
5189
ad8d3948 5190 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5191 factor = 120;
ad8d3948
EG
5192 else if (CHIP_REV_IS_EMUL(bp))
5193 factor = 200;
5194 else
a2fbb9ea 5195 factor = 1;
a2fbb9ea
ET
5196
5197 DP(NETIF_MSG_HW, "start part1\n");
5198
5199 /* Disable inputs of parser neighbor blocks */
5200 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5201 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5202 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5203 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5204
5205 /* Write 0 to parser credits for CFC search request */
5206 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5207
5208 /* send Ethernet packet */
5209 bnx2x_lb_pckt(bp);
5210
5211 /* TODO do i reset NIG statistic? */
5212 /* Wait until NIG register shows 1 packet of size 0x10 */
5213 count = 1000 * factor;
5214 while (count) {
34f80b04 5215
a2fbb9ea
ET
5216 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5217 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5218 if (val == 0x10)
5219 break;
5220
5221 msleep(10);
5222 count--;
5223 }
5224 if (val != 0x10) {
5225 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5226 return -1;
5227 }
5228
5229 /* Wait until PRS register shows 1 packet */
5230 count = 1000 * factor;
5231 while (count) {
5232 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5233 if (val == 1)
5234 break;
5235
5236 msleep(10);
5237 count--;
5238 }
5239 if (val != 0x1) {
5240 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5241 return -2;
5242 }
5243
5244 /* Reset and init BRB, PRS */
34f80b04 5245 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5246 msleep(50);
34f80b04 5247 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea
ET
5248 msleep(50);
5249 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5250 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5251
5252 DP(NETIF_MSG_HW, "part2\n");
5253
5254 /* Disable inputs of parser neighbor blocks */
5255 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5256 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5257 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5258 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5259
5260 /* Write 0 to parser credits for CFC search request */
5261 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5262
5263 /* send 10 Ethernet packets */
5264 for (i = 0; i < 10; i++)
5265 bnx2x_lb_pckt(bp);
5266
5267 /* Wait until NIG register shows 10 + 1
5268 packets of size 11*0x10 = 0xb0 */
5269 count = 1000 * factor;
5270 while (count) {
34f80b04 5271
a2fbb9ea
ET
5272 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5273 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5274 if (val == 0xb0)
5275 break;
5276
5277 msleep(10);
5278 count--;
5279 }
5280 if (val != 0xb0) {
5281 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5282 return -3;
5283 }
5284
5285 /* Wait until PRS register shows 2 packets */
5286 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5287 if (val != 2)
5288 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5289
5290 /* Write 1 to parser credits for CFC search request */
5291 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5292
5293 /* Wait until PRS register shows 3 packets */
5294 msleep(10 * factor);
5295 /* Wait until NIG register shows 1 packet of size 0x10 */
5296 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5297 if (val != 3)
5298 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5299
5300 /* clear NIG EOP FIFO */
5301 for (i = 0; i < 11; i++)
5302 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5303 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5304 if (val != 1) {
5305 BNX2X_ERR("clear of NIG failed\n");
5306 return -4;
5307 }
5308
5309 /* Reset and init BRB, PRS, NIG */
5310 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5311 msleep(50);
5312 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5313 msleep(50);
5314 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5315 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5316#ifndef BCM_ISCSI
5317 /* set NIC mode */
5318 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5319#endif
5320
5321 /* Enable inputs of parser neighbor blocks */
5322 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5323 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5324 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5325 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5326
5327 DP(NETIF_MSG_HW, "done\n");
5328
5329 return 0; /* OK */
5330}
5331
5332static void enable_blocks_attention(struct bnx2x *bp)
5333{
5334 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5335 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5336 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5337 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5338 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5339 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5340 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5341 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5342 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5343/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5344/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5345 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5346 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5347 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5348/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5349/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5350 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5351 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5352 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5353 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5354/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5355/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5356 if (CHIP_REV_IS_FPGA(bp))
5357 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5358 else
5359 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5360 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5361 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5362 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5363/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5364/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5365 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5366 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5367/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5368 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5369}
5370
34f80b04 5371
81f75bbf
EG
5372static void bnx2x_reset_common(struct bnx2x *bp)
5373{
5374 /* reset_common */
5375 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5376 0xd3ffff7f);
5377 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5378}
5379
34f80b04 5380static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5381{
a2fbb9ea 5382 u32 val, i;
a2fbb9ea 5383
34f80b04 5384 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5385
81f75bbf 5386 bnx2x_reset_common(bp);
34f80b04
EG
5387 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5388 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5389
34f80b04
EG
5390 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5391 if (CHIP_IS_E1H(bp))
5392 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5393
34f80b04
EG
5394 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5395 msleep(30);
5396 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5397
34f80b04
EG
5398 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5399 if (CHIP_IS_E1(bp)) {
5400 /* enable HW interrupt from PXP on USDM overflow
5401 bit 16 on INT_MASK_0 */
5402 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5403 }
a2fbb9ea 5404
34f80b04
EG
5405 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5406 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5407
5408#ifdef __BIG_ENDIAN
34f80b04
EG
5409 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5410 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5411 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5412 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5413 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
5414 /* make sure this value is 0 */
5415 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
5416
5417/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5418 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5419 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5420 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5421 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5422#endif
5423
34f80b04 5424 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5425#ifdef BCM_ISCSI
34f80b04
EG
5426 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5427 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5428 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5429#endif
5430
34f80b04
EG
5431 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5432 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5433
34f80b04
EG
5434 /* let the HW do it's magic ... */
5435 msleep(100);
5436 /* finish PXP init */
5437 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5438 if (val != 1) {
5439 BNX2X_ERR("PXP2 CFG failed\n");
5440 return -EBUSY;
5441 }
5442 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5443 if (val != 1) {
5444 BNX2X_ERR("PXP2 RD_INIT failed\n");
5445 return -EBUSY;
5446 }
a2fbb9ea 5447
34f80b04
EG
5448 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5449 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5450
34f80b04 5451 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
a2fbb9ea 5452
34f80b04
EG
5453 /* clean the DMAE memory */
5454 bp->dmae_ready = 1;
5455 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5456
34f80b04
EG
5457 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5458 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5459 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5460 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
a2fbb9ea 5461
34f80b04
EG
5462 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5463 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5464 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5465 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5466
5467 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5468 /* soft reset pulse */
5469 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5470 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5471
5472#ifdef BCM_ISCSI
34f80b04 5473 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
a2fbb9ea 5474#endif
a2fbb9ea 5475
34f80b04
EG
5476 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5477 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5478 if (!CHIP_REV_IS_SLOW(bp)) {
5479 /* enable hw interrupt from doorbell Q */
5480 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5481 }
a2fbb9ea 5482
34f80b04 5483 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
34f80b04 5484 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
26c8fa4d 5485 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3196a88a
EG
5486 /* set NIC mode */
5487 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
5488 if (CHIP_IS_E1H(bp))
5489 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5490
34f80b04
EG
5491 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5492 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5493 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5494 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
a2fbb9ea 5495
34f80b04
EG
5496 if (CHIP_IS_E1H(bp)) {
5497 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5498 STORM_INTMEM_SIZE_E1H/2);
5499 bnx2x_init_fill(bp,
5500 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5501 0, STORM_INTMEM_SIZE_E1H/2);
5502 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5503 STORM_INTMEM_SIZE_E1H/2);
5504 bnx2x_init_fill(bp,
5505 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5506 0, STORM_INTMEM_SIZE_E1H/2);
5507 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5508 STORM_INTMEM_SIZE_E1H/2);
5509 bnx2x_init_fill(bp,
5510 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5511 0, STORM_INTMEM_SIZE_E1H/2);
5512 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5513 STORM_INTMEM_SIZE_E1H/2);
5514 bnx2x_init_fill(bp,
5515 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5516 0, STORM_INTMEM_SIZE_E1H/2);
5517 } else { /* E1 */
ad8d3948
EG
5518 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5519 STORM_INTMEM_SIZE_E1);
5520 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5521 STORM_INTMEM_SIZE_E1);
5522 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5523 STORM_INTMEM_SIZE_E1);
5524 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5525 STORM_INTMEM_SIZE_E1);
34f80b04 5526 }
a2fbb9ea 5527
34f80b04
EG
5528 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5529 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5530 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5531 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
a2fbb9ea 5532
34f80b04
EG
5533 /* sync semi rtc */
5534 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5535 0x80000000);
5536 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5537 0x80000000);
a2fbb9ea 5538
34f80b04
EG
5539 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5540 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5541 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
a2fbb9ea 5542
34f80b04
EG
5543 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5544 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5545 REG_WR(bp, i, 0xc0cac01a);
5546 /* TODO: replace with something meaningful */
5547 }
8d9c5f34 5548 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
34f80b04 5549 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5550
34f80b04
EG
5551 if (sizeof(union cdu_context) != 1024)
5552 /* we currently assume that a context is 1024 bytes */
5553 printk(KERN_ALERT PFX "please adjust the size of"
5554 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5555
34f80b04
EG
5556 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5557 val = (4 << 24) + (0 << 12) + 1024;
5558 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5559 if (CHIP_IS_E1(bp)) {
5560 /* !!! fix pxp client crdit until excel update */
5561 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5562 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5563 }
a2fbb9ea 5564
34f80b04
EG
5565 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5566 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
5567 /* enable context validation interrupt from CFC */
5568 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5569
5570 /* set the thresholds to prevent CFC/CDU race */
5571 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 5572
34f80b04
EG
5573 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5574 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
a2fbb9ea 5575
34f80b04
EG
5576 /* PXPCS COMMON comes here */
5577 /* Reset PCIE errors for debug */
5578 REG_WR(bp, 0x2814, 0xffffffff);
5579 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5580
34f80b04
EG
5581 /* EMAC0 COMMON comes here */
5582 /* EMAC1 COMMON comes here */
5583 /* DBU COMMON comes here */
5584 /* DBG COMMON comes here */
5585
5586 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5587 if (CHIP_IS_E1H(bp)) {
5588 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5589 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5590 }
5591
5592 if (CHIP_REV_IS_SLOW(bp))
5593 msleep(200);
5594
5595 /* finish CFC init */
5596 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5597 if (val != 1) {
5598 BNX2X_ERR("CFC LL_INIT failed\n");
5599 return -EBUSY;
5600 }
5601 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5602 if (val != 1) {
5603 BNX2X_ERR("CFC AC_INIT failed\n");
5604 return -EBUSY;
5605 }
5606 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5607 if (val != 1) {
5608 BNX2X_ERR("CFC CAM_INIT failed\n");
5609 return -EBUSY;
5610 }
5611 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5612
34f80b04
EG
5613 /* read NIG statistic
5614 to see if this is our first up since powerup */
5615 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5616 val = *bnx2x_sp(bp, wb_data[0]);
5617
5618 /* do internal memory self test */
5619 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5620 BNX2X_ERR("internal mem self test failed\n");
5621 return -EBUSY;
5622 }
5623
35b19ba5
EG
5624 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5625 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
34f80b04
EG
5626 /* Fan failure is indicated by SPIO 5 */
5627 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5628 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5629
5630 /* set to active low mode */
5631 val = REG_RD(bp, MISC_REG_SPIO_INT);
5632 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 5633 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 5634 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 5635
34f80b04
EG
5636 /* enable interrupt to signal the IGU */
5637 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5638 val |= (1 << MISC_REGISTERS_SPIO_5);
5639 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5640 break;
f1410647 5641
34f80b04
EG
5642 default:
5643 break;
5644 }
f1410647 5645
34f80b04
EG
5646 /* clear PXP2 attentions */
5647 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5648
34f80b04 5649 enable_blocks_attention(bp);
a2fbb9ea 5650
6bbca910
YR
5651 if (!BP_NOMCP(bp)) {
5652 bnx2x_acquire_phy_lock(bp);
5653 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5654 bnx2x_release_phy_lock(bp);
5655 } else
5656 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5657
34f80b04
EG
5658 return 0;
5659}
a2fbb9ea 5660
34f80b04
EG
5661static int bnx2x_init_port(struct bnx2x *bp)
5662{
5663 int port = BP_PORT(bp);
1c06328c 5664 u32 low, high;
34f80b04 5665 u32 val;
a2fbb9ea 5666
34f80b04
EG
5667 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5668
5669 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5670
5671 /* Port PXP comes here */
5672 /* Port PXP2 comes here */
a2fbb9ea
ET
5673#ifdef BCM_ISCSI
5674 /* Port0 1
5675 * Port1 385 */
5676 i++;
5677 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5678 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5679 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5680 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5681
5682 /* Port0 2
5683 * Port1 386 */
5684 i++;
5685 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5686 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5687 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5688 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5689
5690 /* Port0 3
5691 * Port1 387 */
5692 i++;
5693 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5694 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5695 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5696 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5697#endif
34f80b04 5698 /* Port CMs come here */
8d9c5f34
EG
5699 bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
5700 (port ? XCM_PORT1_END : XCM_PORT0_END));
a2fbb9ea
ET
5701
5702 /* Port QM comes here */
a2fbb9ea
ET
5703#ifdef BCM_ISCSI
5704 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5705 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5706
5707 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5708 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5709#endif
5710 /* Port DQ comes here */
1c06328c
EG
5711
5712 bnx2x_init_block(bp, (port ? BRB1_PORT1_START : BRB1_PORT0_START),
5713 (port ? BRB1_PORT1_END : BRB1_PORT0_END));
5714 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5715 /* no pause for emulation and FPGA */
5716 low = 0;
5717 high = 513;
5718 } else {
5719 if (IS_E1HMF(bp))
5720 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5721 else if (bp->dev->mtu > 4096) {
5722 if (bp->flags & ONE_PORT_FLAG)
5723 low = 160;
5724 else {
5725 val = bp->dev->mtu;
5726 /* (24*1024 + val*4)/256 */
5727 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5728 }
5729 } else
5730 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5731 high = low + 56; /* 14*1024/256 */
5732 }
5733 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5734 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5735
5736
ad8d3948 5737 /* Port PRS comes here */
a2fbb9ea
ET
5738 /* Port TSDM comes here */
5739 /* Port CSDM comes here */
5740 /* Port USDM comes here */
5741 /* Port XSDM comes here */
34f80b04
EG
5742 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5743 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5744 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5745 port ? USEM_PORT1_END : USEM_PORT0_END);
5746 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5747 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5748 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5749 port ? XSEM_PORT1_END : XSEM_PORT0_END);
a2fbb9ea 5750 /* Port UPB comes here */
34f80b04
EG
5751 /* Port XPB comes here */
5752
5753 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5754 port ? PBF_PORT1_END : PBF_PORT0_END);
a2fbb9ea
ET
5755
5756 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5757 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5758
5759 /* update threshold */
34f80b04 5760 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5761 /* update init credit */
34f80b04 5762 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5763
5764 /* probe changes */
34f80b04 5765 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5766 msleep(5);
34f80b04 5767 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5768
5769#ifdef BCM_ISCSI
5770 /* tell the searcher where the T2 table is */
5771 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5772
5773 wb_write[0] = U64_LO(bp->t2_mapping);
5774 wb_write[1] = U64_HI(bp->t2_mapping);
5775 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5776 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5777 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5778 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5779
5780 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5781 /* Port SRCH comes here */
5782#endif
5783 /* Port CDU comes here */
5784 /* Port CFC comes here */
34f80b04
EG
5785
5786 if (CHIP_IS_E1(bp)) {
5787 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5788 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5789 }
5790 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5791 port ? HC_PORT1_END : HC_PORT0_END);
5792
5793 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
a2fbb9ea 5794 MISC_AEU_PORT0_START,
34f80b04
EG
5795 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5796 /* init aeu_mask_attn_func_0/1:
5797 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5798 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5799 * bits 4-7 are used for "per vn group attention" */
5800 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5801 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5802
a2fbb9ea
ET
5803 /* Port PXPCS comes here */
5804 /* Port EMAC0 comes here */
5805 /* Port EMAC1 comes here */
5806 /* Port DBU comes here */
5807 /* Port DBG comes here */
34f80b04
EG
5808 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5809 port ? NIG_PORT1_END : NIG_PORT0_END);
5810
5811 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5812
5813 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
5814 /* 0x2 disable e1hov, 0x1 enable */
5815 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5816 (IS_E1HMF(bp) ? 0x1 : 0x2));
5817
1c06328c
EG
5818 /* support pause requests from USDM, TSDM and BRB */
5819 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
5820
5821 {
5822 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5823 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5824 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5825 }
34f80b04
EG
5826 }
5827
a2fbb9ea
ET
5828 /* Port MCP comes here */
5829 /* Port DMAE comes here */
5830
35b19ba5
EG
5831 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5832 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
f1410647
ET
5833 /* add SPIO 5 to group 0 */
5834 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5835 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5836 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5837 break;
5838
5839 default:
5840 break;
5841 }
5842
c18487ee 5843 bnx2x__link_reset(bp);
a2fbb9ea 5844
34f80b04
EG
5845 return 0;
5846}
5847
5848#define ILT_PER_FUNC (768/2)
5849#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5850/* the phys address is shifted right 12 bits and has an added
5851 1=valid bit added to the 53rd bit
5852 then since this is a wide register(TM)
5853 we split it into two 32 bit writes
5854 */
5855#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5856#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5857#define PXP_ONE_ILT(x) (((x) << 10) | x)
5858#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5859
5860#define CNIC_ILT_LINES 0
5861
5862static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5863{
5864 int reg;
5865
5866 if (CHIP_IS_E1H(bp))
5867 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5868 else /* E1 */
5869 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5870
5871 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5872}
5873
5874static int bnx2x_init_func(struct bnx2x *bp)
5875{
5876 int port = BP_PORT(bp);
5877 int func = BP_FUNC(bp);
8badd27a 5878 u32 addr, val;
34f80b04
EG
5879 int i;
5880
5881 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5882
8badd27a
EG
5883 /* set MSI reconfigure capability */
5884 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5885 val = REG_RD(bp, addr);
5886 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5887 REG_WR(bp, addr, val);
5888
34f80b04
EG
5889 i = FUNC_ILT_BASE(func);
5890
5891 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5892 if (CHIP_IS_E1H(bp)) {
5893 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5894 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5895 } else /* E1 */
5896 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5897 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5898
5899
5900 if (CHIP_IS_E1H(bp)) {
5901 for (i = 0; i < 9; i++)
5902 bnx2x_init_block(bp,
5903 cm_start[func][i], cm_end[func][i]);
5904
5905 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5906 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5907 }
5908
5909 /* HC init per function */
5910 if (CHIP_IS_E1H(bp)) {
5911 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5912
5913 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5914 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5915 }
5916 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5917
c14423fe 5918 /* Reset PCIE errors for debug */
a2fbb9ea
ET
5919 REG_WR(bp, 0x2114, 0xffffffff);
5920 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 5921
34f80b04
EG
5922 return 0;
5923}
5924
5925static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5926{
5927 int i, rc = 0;
a2fbb9ea 5928
34f80b04
EG
5929 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5930 BP_FUNC(bp), load_code);
a2fbb9ea 5931
34f80b04
EG
5932 bp->dmae_ready = 0;
5933 mutex_init(&bp->dmae_mutex);
5934 bnx2x_gunzip_init(bp);
a2fbb9ea 5935
34f80b04
EG
5936 switch (load_code) {
5937 case FW_MSG_CODE_DRV_LOAD_COMMON:
5938 rc = bnx2x_init_common(bp);
5939 if (rc)
5940 goto init_hw_err;
5941 /* no break */
5942
5943 case FW_MSG_CODE_DRV_LOAD_PORT:
5944 bp->dmae_ready = 1;
5945 rc = bnx2x_init_port(bp);
5946 if (rc)
5947 goto init_hw_err;
5948 /* no break */
5949
5950 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5951 bp->dmae_ready = 1;
5952 rc = bnx2x_init_func(bp);
5953 if (rc)
5954 goto init_hw_err;
5955 break;
5956
5957 default:
5958 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5959 break;
5960 }
5961
5962 if (!BP_NOMCP(bp)) {
5963 int func = BP_FUNC(bp);
a2fbb9ea
ET
5964
5965 bp->fw_drv_pulse_wr_seq =
34f80b04 5966 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 5967 DRV_PULSE_SEQ_MASK);
34f80b04
EG
5968 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5969 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5970 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5971 } else
5972 bp->func_stx = 0;
a2fbb9ea 5973
34f80b04
EG
5974 /* this needs to be done before gunzip end */
5975 bnx2x_zero_def_sb(bp);
5976 for_each_queue(bp, i)
5977 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5978
5979init_hw_err:
5980 bnx2x_gunzip_end(bp);
5981
5982 return rc;
a2fbb9ea
ET
5983}
5984
c14423fe 5985/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
5986static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5987{
34f80b04 5988 int func = BP_FUNC(bp);
f1410647
ET
5989 u32 seq = ++bp->fw_seq;
5990 u32 rc = 0;
19680c48
EG
5991 u32 cnt = 1;
5992 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 5993
34f80b04 5994 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 5995 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 5996
19680c48
EG
5997 do {
5998 /* let the FW do it's magic ... */
5999 msleep(delay);
a2fbb9ea 6000
19680c48 6001 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 6002
19680c48
EG
6003 /* Give the FW up to 2 second (200*10ms) */
6004 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6005
6006 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6007 cnt*delay, rc, seq);
a2fbb9ea
ET
6008
6009 /* is this a reply to our command? */
6010 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6011 rc &= FW_MSG_CODE_MASK;
f1410647 6012
a2fbb9ea
ET
6013 } else {
6014 /* FW BUG! */
6015 BNX2X_ERR("FW failed to respond!\n");
6016 bnx2x_fw_dump(bp);
6017 rc = 0;
6018 }
f1410647 6019
a2fbb9ea
ET
6020 return rc;
6021}
6022
6023static void bnx2x_free_mem(struct bnx2x *bp)
6024{
6025
6026#define BNX2X_PCI_FREE(x, y, size) \
6027 do { \
6028 if (x) { \
6029 pci_free_consistent(bp->pdev, size, x, y); \
6030 x = NULL; \
6031 y = 0; \
6032 } \
6033 } while (0)
6034
6035#define BNX2X_FREE(x) \
6036 do { \
6037 if (x) { \
6038 vfree(x); \
6039 x = NULL; \
6040 } \
6041 } while (0)
6042
6043 int i;
6044
6045 /* fastpath */
555f6c78 6046 /* Common */
a2fbb9ea
ET
6047 for_each_queue(bp, i) {
6048
555f6c78 6049 /* status blocks */
a2fbb9ea
ET
6050 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6051 bnx2x_fp(bp, i, status_blk_mapping),
6052 sizeof(struct host_status_block) +
6053 sizeof(struct eth_tx_db_data));
555f6c78
EG
6054 }
6055 /* Rx */
6056 for_each_rx_queue(bp, i) {
a2fbb9ea 6057
555f6c78 6058 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6059 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6060 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6061 bnx2x_fp(bp, i, rx_desc_mapping),
6062 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6063
6064 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6065 bnx2x_fp(bp, i, rx_comp_mapping),
6066 sizeof(struct eth_fast_path_rx_cqe) *
6067 NUM_RCQ_BD);
a2fbb9ea 6068
7a9b2557 6069 /* SGE ring */
32626230 6070 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6071 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6072 bnx2x_fp(bp, i, rx_sge_mapping),
6073 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6074 }
555f6c78
EG
6075 /* Tx */
6076 for_each_tx_queue(bp, i) {
6077
6078 /* fastpath tx rings: tx_buf tx_desc */
6079 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6080 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6081 bnx2x_fp(bp, i, tx_desc_mapping),
6082 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6083 }
a2fbb9ea
ET
6084 /* end of fastpath */
6085
6086 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6087 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6088
6089 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6090 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
6091
6092#ifdef BCM_ISCSI
6093 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6094 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6095 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6096 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6097#endif
7a9b2557 6098 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6099
6100#undef BNX2X_PCI_FREE
6101#undef BNX2X_KFREE
6102}
6103
6104static int bnx2x_alloc_mem(struct bnx2x *bp)
6105{
6106
6107#define BNX2X_PCI_ALLOC(x, y, size) \
6108 do { \
6109 x = pci_alloc_consistent(bp->pdev, size, y); \
6110 if (x == NULL) \
6111 goto alloc_mem_err; \
6112 memset(x, 0, size); \
6113 } while (0)
6114
6115#define BNX2X_ALLOC(x, size) \
6116 do { \
6117 x = vmalloc(size); \
6118 if (x == NULL) \
6119 goto alloc_mem_err; \
6120 memset(x, 0, size); \
6121 } while (0)
6122
6123 int i;
6124
6125 /* fastpath */
555f6c78 6126 /* Common */
a2fbb9ea
ET
6127 for_each_queue(bp, i) {
6128 bnx2x_fp(bp, i, bp) = bp;
6129
555f6c78 6130 /* status blocks */
a2fbb9ea
ET
6131 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6132 &bnx2x_fp(bp, i, status_blk_mapping),
6133 sizeof(struct host_status_block) +
6134 sizeof(struct eth_tx_db_data));
555f6c78
EG
6135 }
6136 /* Rx */
6137 for_each_rx_queue(bp, i) {
a2fbb9ea 6138
555f6c78 6139 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6140 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6141 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6142 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6143 &bnx2x_fp(bp, i, rx_desc_mapping),
6144 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6145
6146 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6147 &bnx2x_fp(bp, i, rx_comp_mapping),
6148 sizeof(struct eth_fast_path_rx_cqe) *
6149 NUM_RCQ_BD);
6150
7a9b2557
VZ
6151 /* SGE ring */
6152 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6153 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6154 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6155 &bnx2x_fp(bp, i, rx_sge_mapping),
6156 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6157 }
555f6c78
EG
6158 /* Tx */
6159 for_each_tx_queue(bp, i) {
6160
6161 bnx2x_fp(bp, i, hw_tx_prods) =
6162 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6163
6164 bnx2x_fp(bp, i, tx_prods_mapping) =
6165 bnx2x_fp(bp, i, status_blk_mapping) +
6166 sizeof(struct host_status_block);
6167
6168 /* fastpath tx rings: tx_buf tx_desc */
6169 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6170 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6171 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6172 &bnx2x_fp(bp, i, tx_desc_mapping),
6173 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6174 }
a2fbb9ea
ET
6175 /* end of fastpath */
6176
6177 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6178 sizeof(struct host_def_status_block));
6179
6180 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6181 sizeof(struct bnx2x_slowpath));
6182
6183#ifdef BCM_ISCSI
6184 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6185
6186 /* Initialize T1 */
6187 for (i = 0; i < 64*1024; i += 64) {
6188 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6189 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6190 }
6191
6192 /* allocate searcher T2 table
6193 we allocate 1/4 of alloc num for T2
6194 (which is not entered into the ILT) */
6195 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6196
6197 /* Initialize T2 */
6198 for (i = 0; i < 16*1024; i += 64)
6199 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6200
c14423fe 6201 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
6202 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6203
6204 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6205 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6206
6207 /* QM queues (128*MAX_CONN) */
6208 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6209#endif
6210
6211 /* Slow path ring */
6212 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6213
6214 return 0;
6215
6216alloc_mem_err:
6217 bnx2x_free_mem(bp);
6218 return -ENOMEM;
6219
6220#undef BNX2X_PCI_ALLOC
6221#undef BNX2X_ALLOC
6222}
6223
6224static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6225{
6226 int i;
6227
555f6c78 6228 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
6229 struct bnx2x_fastpath *fp = &bp->fp[i];
6230
6231 u16 bd_cons = fp->tx_bd_cons;
6232 u16 sw_prod = fp->tx_pkt_prod;
6233 u16 sw_cons = fp->tx_pkt_cons;
6234
a2fbb9ea
ET
6235 while (sw_cons != sw_prod) {
6236 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6237 sw_cons++;
6238 }
6239 }
6240}
6241
6242static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6243{
6244 int i, j;
6245
555f6c78 6246 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6247 struct bnx2x_fastpath *fp = &bp->fp[j];
6248
a2fbb9ea
ET
6249 for (i = 0; i < NUM_RX_BD; i++) {
6250 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6251 struct sk_buff *skb = rx_buf->skb;
6252
6253 if (skb == NULL)
6254 continue;
6255
6256 pci_unmap_single(bp->pdev,
6257 pci_unmap_addr(rx_buf, mapping),
437cf2f1 6258 bp->rx_buf_size,
a2fbb9ea
ET
6259 PCI_DMA_FROMDEVICE);
6260
6261 rx_buf->skb = NULL;
6262 dev_kfree_skb(skb);
6263 }
7a9b2557 6264 if (!fp->disable_tpa)
32626230
EG
6265 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6266 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6267 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6268 }
6269}
6270
6271static void bnx2x_free_skbs(struct bnx2x *bp)
6272{
6273 bnx2x_free_tx_skbs(bp);
6274 bnx2x_free_rx_skbs(bp);
6275}
6276
6277static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6278{
34f80b04 6279 int i, offset = 1;
a2fbb9ea
ET
6280
6281 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6282 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6283 bp->msix_table[0].vector);
6284
6285 for_each_queue(bp, i) {
c14423fe 6286 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6287 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6288 bnx2x_fp(bp, i, state));
6289
34f80b04 6290 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6291 }
a2fbb9ea
ET
6292}
6293
6294static void bnx2x_free_irq(struct bnx2x *bp)
6295{
a2fbb9ea 6296 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6297 bnx2x_free_msix_irqs(bp);
6298 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6299 bp->flags &= ~USING_MSIX_FLAG;
6300
8badd27a
EG
6301 } else if (bp->flags & USING_MSI_FLAG) {
6302 free_irq(bp->pdev->irq, bp->dev);
6303 pci_disable_msi(bp->pdev);
6304 bp->flags &= ~USING_MSI_FLAG;
6305
a2fbb9ea
ET
6306 } else
6307 free_irq(bp->pdev->irq, bp->dev);
6308}
6309
6310static int bnx2x_enable_msix(struct bnx2x *bp)
6311{
8badd27a
EG
6312 int i, rc, offset = 1;
6313 int igu_vec = 0;
a2fbb9ea 6314
8badd27a
EG
6315 bp->msix_table[0].entry = igu_vec;
6316 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6317
34f80b04 6318 for_each_queue(bp, i) {
8badd27a 6319 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
6320 bp->msix_table[i + offset].entry = igu_vec;
6321 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6322 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6323 }
6324
34f80b04 6325 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 6326 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 6327 if (rc) {
8badd27a
EG
6328 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6329 return rc;
34f80b04 6330 }
8badd27a 6331
a2fbb9ea
ET
6332 bp->flags |= USING_MSIX_FLAG;
6333
6334 return 0;
a2fbb9ea
ET
6335}
6336
a2fbb9ea
ET
6337static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6338{
34f80b04 6339 int i, rc, offset = 1;
a2fbb9ea 6340
a2fbb9ea
ET
6341 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6342 bp->dev->name, bp->dev);
a2fbb9ea
ET
6343 if (rc) {
6344 BNX2X_ERR("request sp irq failed\n");
6345 return -EBUSY;
6346 }
6347
6348 for_each_queue(bp, i) {
555f6c78
EG
6349 struct bnx2x_fastpath *fp = &bp->fp[i];
6350
6351 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
34f80b04 6352 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 6353 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 6354 if (rc) {
555f6c78 6355 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
6356 bnx2x_free_msix_irqs(bp);
6357 return -EBUSY;
6358 }
6359
555f6c78 6360 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6361 }
6362
555f6c78
EG
6363 i = BNX2X_NUM_QUEUES(bp);
6364 if (is_multi(bp))
6365 printk(KERN_INFO PFX
6366 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6367 bp->dev->name, bp->msix_table[0].vector,
6368 bp->msix_table[offset].vector,
6369 bp->msix_table[offset + i - 1].vector);
6370 else
6371 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6372 bp->dev->name, bp->msix_table[0].vector,
6373 bp->msix_table[offset + i - 1].vector);
6374
a2fbb9ea 6375 return 0;
a2fbb9ea
ET
6376}
6377
8badd27a
EG
6378static int bnx2x_enable_msi(struct bnx2x *bp)
6379{
6380 int rc;
6381
6382 rc = pci_enable_msi(bp->pdev);
6383 if (rc) {
6384 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6385 return -1;
6386 }
6387 bp->flags |= USING_MSI_FLAG;
6388
6389 return 0;
6390}
6391
a2fbb9ea
ET
6392static int bnx2x_req_irq(struct bnx2x *bp)
6393{
8badd27a 6394 unsigned long flags;
34f80b04 6395 int rc;
a2fbb9ea 6396
8badd27a
EG
6397 if (bp->flags & USING_MSI_FLAG)
6398 flags = 0;
6399 else
6400 flags = IRQF_SHARED;
6401
6402 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 6403 bp->dev->name, bp->dev);
a2fbb9ea
ET
6404 if (!rc)
6405 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6406
6407 return rc;
a2fbb9ea
ET
6408}
6409
65abd74d
YG
6410static void bnx2x_napi_enable(struct bnx2x *bp)
6411{
6412 int i;
6413
555f6c78 6414 for_each_rx_queue(bp, i)
65abd74d
YG
6415 napi_enable(&bnx2x_fp(bp, i, napi));
6416}
6417
6418static void bnx2x_napi_disable(struct bnx2x *bp)
6419{
6420 int i;
6421
555f6c78 6422 for_each_rx_queue(bp, i)
65abd74d
YG
6423 napi_disable(&bnx2x_fp(bp, i, napi));
6424}
6425
6426static void bnx2x_netif_start(struct bnx2x *bp)
6427{
6428 if (atomic_dec_and_test(&bp->intr_sem)) {
6429 if (netif_running(bp->dev)) {
65abd74d
YG
6430 bnx2x_napi_enable(bp);
6431 bnx2x_int_enable(bp);
555f6c78
EG
6432 if (bp->state == BNX2X_STATE_OPEN)
6433 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
6434 }
6435 }
6436}
6437
f8ef6e44 6438static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6439{
f8ef6e44 6440 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 6441 bnx2x_napi_disable(bp);
65abd74d 6442 if (netif_running(bp->dev)) {
65abd74d
YG
6443 netif_tx_disable(bp->dev);
6444 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6445 }
6446}
6447
a2fbb9ea
ET
6448/*
6449 * Init service functions
6450 */
6451
3101c2bc 6452static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6453{
6454 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6455 int port = BP_PORT(bp);
a2fbb9ea
ET
6456
6457 /* CAM allocation
6458 * unicasts 0-31:port0 32-63:port1
6459 * multicast 64-127:port0 128-191:port1
6460 */
8d9c5f34 6461 config->hdr.length = 2;
af246401 6462 config->hdr.offset = port ? 32 : 0;
34f80b04 6463 config->hdr.client_id = BP_CL_ID(bp);
a2fbb9ea
ET
6464 config->hdr.reserved1 = 0;
6465
6466 /* primary MAC */
6467 config->config_table[0].cam_entry.msb_mac_addr =
6468 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6469 config->config_table[0].cam_entry.middle_mac_addr =
6470 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6471 config->config_table[0].cam_entry.lsb_mac_addr =
6472 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6473 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6474 if (set)
6475 config->config_table[0].target_table_entry.flags = 0;
6476 else
6477 CAM_INVALIDATE(config->config_table[0]);
a2fbb9ea
ET
6478 config->config_table[0].target_table_entry.client_id = 0;
6479 config->config_table[0].target_table_entry.vlan_id = 0;
6480
3101c2bc
YG
6481 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6482 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6483 config->config_table[0].cam_entry.msb_mac_addr,
6484 config->config_table[0].cam_entry.middle_mac_addr,
6485 config->config_table[0].cam_entry.lsb_mac_addr);
6486
6487 /* broadcast */
6488 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6489 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6490 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
34f80b04 6491 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6492 if (set)
6493 config->config_table[1].target_table_entry.flags =
a2fbb9ea 6494 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
6495 else
6496 CAM_INVALIDATE(config->config_table[1]);
a2fbb9ea
ET
6497 config->config_table[1].target_table_entry.client_id = 0;
6498 config->config_table[1].target_table_entry.vlan_id = 0;
6499
6500 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6501 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6502 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6503}
6504
3101c2bc 6505static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
6506{
6507 struct mac_configuration_cmd_e1h *config =
6508 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6509
3101c2bc 6510 if (set && (bp->state != BNX2X_STATE_OPEN)) {
34f80b04
EG
6511 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6512 return;
6513 }
6514
6515 /* CAM allocation for E1H
6516 * unicasts: by func number
6517 * multicast: 20+FUNC*20, 20 each
6518 */
8d9c5f34 6519 config->hdr.length = 1;
34f80b04
EG
6520 config->hdr.offset = BP_FUNC(bp);
6521 config->hdr.client_id = BP_CL_ID(bp);
6522 config->hdr.reserved1 = 0;
6523
6524 /* primary MAC */
6525 config->config_table[0].msb_mac_addr =
6526 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6527 config->config_table[0].middle_mac_addr =
6528 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6529 config->config_table[0].lsb_mac_addr =
6530 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6531 config->config_table[0].client_id = BP_L_ID(bp);
6532 config->config_table[0].vlan_id = 0;
6533 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6534 if (set)
6535 config->config_table[0].flags = BP_PORT(bp);
6536 else
6537 config->config_table[0].flags =
6538 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6539
3101c2bc
YG
6540 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6541 (set ? "setting" : "clearing"),
34f80b04
EG
6542 config->config_table[0].msb_mac_addr,
6543 config->config_table[0].middle_mac_addr,
6544 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6545
6546 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6547 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6548 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6549}
6550
a2fbb9ea
ET
6551static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6552 int *state_p, int poll)
6553{
6554 /* can take a while if any port is running */
34f80b04 6555 int cnt = 500;
a2fbb9ea 6556
c14423fe
ET
6557 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6558 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6559
6560 might_sleep();
34f80b04 6561 while (cnt--) {
a2fbb9ea
ET
6562 if (poll) {
6563 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6564 /* if index is different from 0
6565 * the reply for some commands will
3101c2bc 6566 * be on the non default queue
a2fbb9ea
ET
6567 */
6568 if (idx)
6569 bnx2x_rx_int(&bp->fp[idx], 10);
6570 }
a2fbb9ea 6571
3101c2bc 6572 mb(); /* state is changed by bnx2x_sp_event() */
49d66772 6573 if (*state_p == state)
a2fbb9ea
ET
6574 return 0;
6575
a2fbb9ea 6576 msleep(1);
a2fbb9ea
ET
6577 }
6578
a2fbb9ea 6579 /* timeout! */
49d66772
ET
6580 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6581 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6582#ifdef BNX2X_STOP_ON_ERROR
6583 bnx2x_panic();
6584#endif
a2fbb9ea 6585
49d66772 6586 return -EBUSY;
a2fbb9ea
ET
6587}
6588
6589static int bnx2x_setup_leading(struct bnx2x *bp)
6590{
34f80b04 6591 int rc;
a2fbb9ea 6592
c14423fe 6593 /* reset IGU state */
34f80b04 6594 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6595
6596 /* SETUP ramrod */
6597 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6598
34f80b04
EG
6599 /* Wait for completion */
6600 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6601
34f80b04 6602 return rc;
a2fbb9ea
ET
6603}
6604
6605static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6606{
555f6c78
EG
6607 struct bnx2x_fastpath *fp = &bp->fp[index];
6608
a2fbb9ea 6609 /* reset IGU state */
555f6c78 6610 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6611
228241eb 6612 /* SETUP ramrod */
555f6c78
EG
6613 fp->state = BNX2X_FP_STATE_OPENING;
6614 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6615 fp->cl_id, 0);
a2fbb9ea
ET
6616
6617 /* Wait for completion */
6618 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 6619 &(fp->state), 0);
a2fbb9ea
ET
6620}
6621
a2fbb9ea 6622static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 6623
8badd27a 6624static void bnx2x_set_int_mode(struct bnx2x *bp)
a2fbb9ea 6625{
555f6c78 6626 int num_queues;
a2fbb9ea 6627
8badd27a
EG
6628 switch (int_mode) {
6629 case INT_MODE_INTx:
6630 case INT_MODE_MSI:
555f6c78
EG
6631 num_queues = 1;
6632 bp->num_rx_queues = num_queues;
6633 bp->num_tx_queues = num_queues;
6634 DP(NETIF_MSG_IFUP,
6635 "set number of queues to %d\n", num_queues);
8badd27a
EG
6636 break;
6637
6638 case INT_MODE_MSIX:
6639 default:
555f6c78
EG
6640 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6641 num_queues = min_t(u32, num_online_cpus(),
6642 BNX2X_MAX_QUEUES(bp));
34f80b04 6643 else
555f6c78
EG
6644 num_queues = 1;
6645 bp->num_rx_queues = num_queues;
6646 bp->num_tx_queues = num_queues;
6647 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6648 " number of tx queues to %d\n",
6649 bp->num_rx_queues, bp->num_tx_queues);
2dfe0e1f
EG
6650 /* if we can't use MSI-X we only need one fp,
6651 * so try to enable MSI-X with the requested number of fp's
6652 * and fallback to MSI or legacy INTx with one fp
6653 */
8badd27a 6654 if (bnx2x_enable_msix(bp)) {
34f80b04 6655 /* failed to enable MSI-X */
555f6c78
EG
6656 num_queues = 1;
6657 bp->num_rx_queues = num_queues;
6658 bp->num_tx_queues = num_queues;
6659 if (bp->multi_mode)
6660 BNX2X_ERR("Multi requested but failed to "
6661 "enable MSI-X set number of "
6662 "queues to %d\n", num_queues);
a2fbb9ea 6663 }
8badd27a 6664 break;
a2fbb9ea 6665 }
555f6c78 6666 bp->dev->real_num_tx_queues = bp->num_tx_queues;
8badd27a
EG
6667}
6668
6669static void bnx2x_set_rx_mode(struct net_device *dev);
6670
6671/* must be called with rtnl_lock */
6672static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6673{
6674 u32 load_code;
6675 int i, rc = 0;
6676#ifdef BNX2X_STOP_ON_ERROR
6677 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6678 if (unlikely(bp->panic))
6679 return -EPERM;
6680#endif
6681
6682 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6683
6684 bnx2x_set_int_mode(bp);
c14423fe 6685
a2fbb9ea
ET
6686 if (bnx2x_alloc_mem(bp))
6687 return -ENOMEM;
6688
555f6c78 6689 for_each_rx_queue(bp, i)
7a9b2557
VZ
6690 bnx2x_fp(bp, i, disable_tpa) =
6691 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6692
555f6c78 6693 for_each_rx_queue(bp, i)
2dfe0e1f
EG
6694 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6695 bnx2x_poll, 128);
6696
6697#ifdef BNX2X_STOP_ON_ERROR
555f6c78 6698 for_each_rx_queue(bp, i) {
2dfe0e1f
EG
6699 struct bnx2x_fastpath *fp = &bp->fp[i];
6700
6701 fp->poll_no_work = 0;
6702 fp->poll_calls = 0;
6703 fp->poll_max_calls = 0;
6704 fp->poll_complete = 0;
6705 fp->poll_exit = 0;
6706 }
6707#endif
6708 bnx2x_napi_enable(bp);
6709
34f80b04
EG
6710 if (bp->flags & USING_MSIX_FLAG) {
6711 rc = bnx2x_req_msix_irqs(bp);
6712 if (rc) {
6713 pci_disable_msix(bp->pdev);
2dfe0e1f 6714 goto load_error1;
34f80b04
EG
6715 }
6716 } else {
8badd27a
EG
6717 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6718 bnx2x_enable_msi(bp);
34f80b04
EG
6719 bnx2x_ack_int(bp);
6720 rc = bnx2x_req_irq(bp);
6721 if (rc) {
2dfe0e1f 6722 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
8badd27a
EG
6723 if (bp->flags & USING_MSI_FLAG)
6724 pci_disable_msi(bp->pdev);
2dfe0e1f 6725 goto load_error1;
a2fbb9ea 6726 }
8badd27a
EG
6727 if (bp->flags & USING_MSI_FLAG) {
6728 bp->dev->irq = bp->pdev->irq;
6729 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6730 bp->dev->name, bp->pdev->irq);
6731 }
a2fbb9ea
ET
6732 }
6733
2dfe0e1f
EG
6734 /* Send LOAD_REQUEST command to MCP
6735 Returns the type of LOAD command:
6736 if it is the first port to be initialized
6737 common blocks should be initialized, otherwise - not
6738 */
6739 if (!BP_NOMCP(bp)) {
6740 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6741 if (!load_code) {
6742 BNX2X_ERR("MCP response failure, aborting\n");
6743 rc = -EBUSY;
6744 goto load_error2;
6745 }
6746 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6747 rc = -EBUSY; /* other port in diagnostic mode */
6748 goto load_error2;
6749 }
6750
6751 } else {
6752 int port = BP_PORT(bp);
6753
6754 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6755 load_count[0], load_count[1], load_count[2]);
6756 load_count[0]++;
6757 load_count[1 + port]++;
6758 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6759 load_count[0], load_count[1], load_count[2]);
6760 if (load_count[0] == 1)
6761 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6762 else if (load_count[1 + port] == 1)
6763 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6764 else
6765 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6766 }
6767
6768 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6769 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6770 bp->port.pmf = 1;
6771 else
6772 bp->port.pmf = 0;
6773 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 6774
a2fbb9ea 6775 /* Initialize HW */
34f80b04
EG
6776 rc = bnx2x_init_hw(bp, load_code);
6777 if (rc) {
a2fbb9ea 6778 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 6779 goto load_error2;
a2fbb9ea
ET
6780 }
6781
a2fbb9ea 6782 /* Setup NIC internals and enable interrupts */
471de716 6783 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
6784
6785 /* Send LOAD_DONE command to MCP */
34f80b04 6786 if (!BP_NOMCP(bp)) {
228241eb
ET
6787 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6788 if (!load_code) {
da5a662a 6789 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 6790 rc = -EBUSY;
2dfe0e1f 6791 goto load_error3;
a2fbb9ea
ET
6792 }
6793 }
6794
6795 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6796
34f80b04
EG
6797 rc = bnx2x_setup_leading(bp);
6798 if (rc) {
da5a662a 6799 BNX2X_ERR("Setup leading failed!\n");
2dfe0e1f 6800 goto load_error3;
34f80b04 6801 }
a2fbb9ea 6802
34f80b04
EG
6803 if (CHIP_IS_E1H(bp))
6804 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6805 BNX2X_ERR("!!! mf_cfg function disabled\n");
6806 bp->state = BNX2X_STATE_DISABLED;
6807 }
a2fbb9ea 6808
34f80b04
EG
6809 if (bp->state == BNX2X_STATE_OPEN)
6810 for_each_nondefault_queue(bp, i) {
6811 rc = bnx2x_setup_multi(bp, i);
6812 if (rc)
2dfe0e1f 6813 goto load_error3;
34f80b04 6814 }
a2fbb9ea 6815
34f80b04 6816 if (CHIP_IS_E1(bp))
3101c2bc 6817 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 6818 else
3101c2bc 6819 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04
EG
6820
6821 if (bp->port.pmf)
6822 bnx2x_initial_phy_init(bp);
a2fbb9ea
ET
6823
6824 /* Start fast path */
34f80b04
EG
6825 switch (load_mode) {
6826 case LOAD_NORMAL:
6827 /* Tx queue should be only reenabled */
555f6c78 6828 netif_tx_wake_all_queues(bp->dev);
2dfe0e1f 6829 /* Initialize the receive filter. */
34f80b04
EG
6830 bnx2x_set_rx_mode(bp->dev);
6831 break;
6832
6833 case LOAD_OPEN:
555f6c78 6834 netif_tx_start_all_queues(bp->dev);
2dfe0e1f 6835 /* Initialize the receive filter. */
34f80b04 6836 bnx2x_set_rx_mode(bp->dev);
34f80b04 6837 break;
a2fbb9ea 6838
34f80b04 6839 case LOAD_DIAG:
2dfe0e1f 6840 /* Initialize the receive filter. */
a2fbb9ea 6841 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
6842 bp->state = BNX2X_STATE_DIAG;
6843 break;
6844
6845 default:
6846 break;
a2fbb9ea
ET
6847 }
6848
34f80b04
EG
6849 if (!bp->port.pmf)
6850 bnx2x__link_status_update(bp);
6851
a2fbb9ea
ET
6852 /* start the timer */
6853 mod_timer(&bp->timer, jiffies + bp->current_interval);
6854
34f80b04 6855
a2fbb9ea
ET
6856 return 0;
6857
2dfe0e1f
EG
6858load_error3:
6859 bnx2x_int_disable_sync(bp, 1);
6860 if (!BP_NOMCP(bp)) {
6861 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
6862 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6863 }
6864 bp->port.pmf = 0;
7a9b2557
VZ
6865 /* Free SKBs, SGEs, TPA pool and driver internals */
6866 bnx2x_free_skbs(bp);
555f6c78 6867 for_each_rx_queue(bp, i)
3196a88a 6868 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 6869load_error2:
d1014634
YG
6870 /* Release IRQs */
6871 bnx2x_free_irq(bp);
2dfe0e1f
EG
6872load_error1:
6873 bnx2x_napi_disable(bp);
555f6c78 6874 for_each_rx_queue(bp, i)
7cde1c8b 6875 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
6876 bnx2x_free_mem(bp);
6877
6878 /* TBD we really need to reset the chip
6879 if we want to recover from this */
34f80b04 6880 return rc;
a2fbb9ea
ET
6881}
6882
6883static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6884{
555f6c78 6885 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
6886 int rc;
6887
c14423fe 6888 /* halt the connection */
555f6c78
EG
6889 fp->state = BNX2X_FP_STATE_HALTING;
6890 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 6891
34f80b04 6892 /* Wait for completion */
a2fbb9ea 6893 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 6894 &(fp->state), 1);
c14423fe 6895 if (rc) /* timeout */
a2fbb9ea
ET
6896 return rc;
6897
6898 /* delete cfc entry */
6899 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6900
34f80b04
EG
6901 /* Wait for completion */
6902 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 6903 &(fp->state), 1);
34f80b04 6904 return rc;
a2fbb9ea
ET
6905}
6906
da5a662a 6907static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 6908{
49d66772 6909 u16 dsb_sp_prod_idx;
c14423fe 6910 /* if the other port is handling traffic,
a2fbb9ea 6911 this can take a lot of time */
34f80b04
EG
6912 int cnt = 500;
6913 int rc;
a2fbb9ea
ET
6914
6915 might_sleep();
6916
6917 /* Send HALT ramrod */
6918 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
34f80b04 6919 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
a2fbb9ea 6920
34f80b04
EG
6921 /* Wait for completion */
6922 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6923 &(bp->fp[0].state), 1);
6924 if (rc) /* timeout */
da5a662a 6925 return rc;
a2fbb9ea 6926
49d66772 6927 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 6928
228241eb 6929 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
6930 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6931
49d66772 6932 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
6933 we are going to reset the chip anyway
6934 so there is not much to do if this times out
6935 */
34f80b04 6936 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
6937 if (!cnt) {
6938 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6939 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6940 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6941#ifdef BNX2X_STOP_ON_ERROR
6942 bnx2x_panic();
da5a662a
VZ
6943#else
6944 rc = -EBUSY;
34f80b04
EG
6945#endif
6946 break;
6947 }
6948 cnt--;
da5a662a 6949 msleep(1);
5650d9d4 6950 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
6951 }
6952 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6953 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
6954
6955 return rc;
a2fbb9ea
ET
6956}
6957
34f80b04
EG
6958static void bnx2x_reset_func(struct bnx2x *bp)
6959{
6960 int port = BP_PORT(bp);
6961 int func = BP_FUNC(bp);
6962 int base, i;
6963
6964 /* Configure IGU */
6965 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6966 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6967
34f80b04
EG
6968 /* Clear ILT */
6969 base = FUNC_ILT_BASE(func);
6970 for (i = base; i < base + ILT_PER_FUNC; i++)
6971 bnx2x_ilt_wr(bp, i, 0);
6972}
6973
6974static void bnx2x_reset_port(struct bnx2x *bp)
6975{
6976 int port = BP_PORT(bp);
6977 u32 val;
6978
6979 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6980
6981 /* Do not rcv packets to BRB */
6982 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6983 /* Do not direct rcv packets that are not for MCP to the BRB */
6984 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6985 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6986
6987 /* Configure AEU */
6988 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6989
6990 msleep(100);
6991 /* Check for BRB port occupancy */
6992 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6993 if (val)
6994 DP(NETIF_MSG_IFDOWN,
33471629 6995 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
6996
6997 /* TODO: Close Doorbell port? */
6998}
6999
34f80b04
EG
7000static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7001{
7002 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7003 BP_FUNC(bp), reset_code);
7004
7005 switch (reset_code) {
7006 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7007 bnx2x_reset_port(bp);
7008 bnx2x_reset_func(bp);
7009 bnx2x_reset_common(bp);
7010 break;
7011
7012 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7013 bnx2x_reset_port(bp);
7014 bnx2x_reset_func(bp);
7015 break;
7016
7017 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7018 bnx2x_reset_func(bp);
7019 break;
49d66772 7020
34f80b04
EG
7021 default:
7022 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7023 break;
7024 }
7025}
7026
33471629 7027/* must be called with rtnl_lock */
34f80b04 7028static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7029{
da5a662a 7030 int port = BP_PORT(bp);
a2fbb9ea 7031 u32 reset_code = 0;
da5a662a 7032 int i, cnt, rc;
a2fbb9ea
ET
7033
7034 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7035
228241eb
ET
7036 bp->rx_mode = BNX2X_RX_MODE_NONE;
7037 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7038
f8ef6e44 7039 bnx2x_netif_stop(bp, 1);
e94d8af3 7040
34f80b04
EG
7041 del_timer_sync(&bp->timer);
7042 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7043 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7044 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7045
70b9986c
EG
7046 /* Release IRQs */
7047 bnx2x_free_irq(bp);
7048
555f6c78
EG
7049 /* Wait until tx fastpath tasks complete */
7050 for_each_tx_queue(bp, i) {
228241eb
ET
7051 struct bnx2x_fastpath *fp = &bp->fp[i];
7052
34f80b04
EG
7053 cnt = 1000;
7054 smp_rmb();
e8b5fc51 7055 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7056
65abd74d 7057 bnx2x_tx_int(fp, 1000);
34f80b04
EG
7058 if (!cnt) {
7059 BNX2X_ERR("timeout waiting for queue[%d]\n",
7060 i);
7061#ifdef BNX2X_STOP_ON_ERROR
7062 bnx2x_panic();
7063 return -EBUSY;
7064#else
7065 break;
7066#endif
7067 }
7068 cnt--;
da5a662a 7069 msleep(1);
34f80b04
EG
7070 smp_rmb();
7071 }
228241eb 7072 }
da5a662a
VZ
7073 /* Give HW time to discard old tx messages */
7074 msleep(1);
a2fbb9ea 7075
3101c2bc
YG
7076 if (CHIP_IS_E1(bp)) {
7077 struct mac_configuration_cmd *config =
7078 bnx2x_sp(bp, mcast_config);
7079
7080 bnx2x_set_mac_addr_e1(bp, 0);
7081
8d9c5f34 7082 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7083 CAM_INVALIDATE(config->config_table[i]);
7084
8d9c5f34 7085 config->hdr.length = i;
3101c2bc
YG
7086 if (CHIP_REV_IS_SLOW(bp))
7087 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7088 else
7089 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7090 config->hdr.client_id = BP_CL_ID(bp);
7091 config->hdr.reserved1 = 0;
7092
7093 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7094 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7095 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7096
7097 } else { /* E1H */
65abd74d
YG
7098 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7099
3101c2bc
YG
7100 bnx2x_set_mac_addr_e1h(bp, 0);
7101
7102 for (i = 0; i < MC_HASH_SIZE; i++)
7103 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7104 }
7105
65abd74d
YG
7106 if (unload_mode == UNLOAD_NORMAL)
7107 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7108
7109 else if (bp->flags & NO_WOL_FLAG) {
7110 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7111 if (CHIP_IS_E1H(bp))
7112 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7113
7114 } else if (bp->wol) {
7115 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7116 u8 *mac_addr = bp->dev->dev_addr;
7117 u32 val;
7118 /* The mac address is written to entries 1-4 to
7119 preserve entry 0 which is used by the PMF */
7120 u8 entry = (BP_E1HVN(bp) + 1)*8;
7121
7122 val = (mac_addr[0] << 8) | mac_addr[1];
7123 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7124
7125 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7126 (mac_addr[4] << 8) | mac_addr[5];
7127 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7128
7129 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7130
7131 } else
7132 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7133
34f80b04
EG
7134 /* Close multi and leading connections
7135 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
7136 for_each_nondefault_queue(bp, i)
7137 if (bnx2x_stop_multi(bp, i))
228241eb 7138 goto unload_error;
a2fbb9ea 7139
da5a662a
VZ
7140 rc = bnx2x_stop_leading(bp);
7141 if (rc) {
34f80b04 7142 BNX2X_ERR("Stop leading failed!\n");
da5a662a 7143#ifdef BNX2X_STOP_ON_ERROR
34f80b04 7144 return -EBUSY;
da5a662a
VZ
7145#else
7146 goto unload_error;
34f80b04 7147#endif
228241eb
ET
7148 }
7149
7150unload_error:
34f80b04 7151 if (!BP_NOMCP(bp))
228241eb 7152 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7153 else {
7154 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
7155 load_count[0], load_count[1], load_count[2]);
7156 load_count[0]--;
da5a662a 7157 load_count[1 + port]--;
34f80b04
EG
7158 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
7159 load_count[0], load_count[1], load_count[2]);
7160 if (load_count[0] == 0)
7161 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 7162 else if (load_count[1 + port] == 0)
34f80b04
EG
7163 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7164 else
7165 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7166 }
a2fbb9ea 7167
34f80b04
EG
7168 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7169 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7170 bnx2x__link_reset(bp);
a2fbb9ea
ET
7171
7172 /* Reset the chip */
228241eb 7173 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7174
7175 /* Report UNLOAD_DONE to MCP */
34f80b04 7176 if (!BP_NOMCP(bp))
a2fbb9ea 7177 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9a035440 7178 bp->port.pmf = 0;
a2fbb9ea 7179
7a9b2557 7180 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 7181 bnx2x_free_skbs(bp);
555f6c78 7182 for_each_rx_queue(bp, i)
3196a88a 7183 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 7184 for_each_rx_queue(bp, i)
7cde1c8b 7185 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7186 bnx2x_free_mem(bp);
7187
7188 bp->state = BNX2X_STATE_CLOSED;
228241eb 7189
a2fbb9ea
ET
7190 netif_carrier_off(bp->dev);
7191
7192 return 0;
7193}
7194
34f80b04
EG
7195static void bnx2x_reset_task(struct work_struct *work)
7196{
7197 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7198
7199#ifdef BNX2X_STOP_ON_ERROR
7200 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7201 " so reset not done to allow debug dump,\n"
7202 KERN_ERR " you will need to reboot when done\n");
7203 return;
7204#endif
7205
7206 rtnl_lock();
7207
7208 if (!netif_running(bp->dev))
7209 goto reset_task_exit;
7210
7211 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7212 bnx2x_nic_load(bp, LOAD_NORMAL);
7213
7214reset_task_exit:
7215 rtnl_unlock();
7216}
7217
a2fbb9ea
ET
7218/* end of nic load/unload */
7219
7220/* ethtool_ops */
7221
7222/*
7223 * Init service functions
7224 */
7225
f1ef27ef
EG
7226static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7227{
7228 switch (func) {
7229 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7230 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7231 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7232 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7233 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7234 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7235 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7236 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7237 default:
7238 BNX2X_ERR("Unsupported function index: %d\n", func);
7239 return (u32)(-1);
7240 }
7241}
7242
7243static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7244{
7245 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7246
7247 /* Flush all outstanding writes */
7248 mmiowb();
7249
7250 /* Pretend to be function 0 */
7251 REG_WR(bp, reg, 0);
7252 /* Flush the GRC transaction (in the chip) */
7253 new_val = REG_RD(bp, reg);
7254 if (new_val != 0) {
7255 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7256 new_val);
7257 BUG();
7258 }
7259
7260 /* From now we are in the "like-E1" mode */
7261 bnx2x_int_disable(bp);
7262
7263 /* Flush all outstanding writes */
7264 mmiowb();
7265
7266 /* Restore the original funtion settings */
7267 REG_WR(bp, reg, orig_func);
7268 new_val = REG_RD(bp, reg);
7269 if (new_val != orig_func) {
7270 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7271 orig_func, new_val);
7272 BUG();
7273 }
7274}
7275
7276static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7277{
7278 if (CHIP_IS_E1H(bp))
7279 bnx2x_undi_int_disable_e1h(bp, func);
7280 else
7281 bnx2x_int_disable(bp);
7282}
7283
34f80b04
EG
7284static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7285{
7286 u32 val;
7287
7288 /* Check if there is any driver already loaded */
7289 val = REG_RD(bp, MISC_REG_UNPREPARED);
7290 if (val == 0x1) {
7291 /* Check if it is the UNDI driver
7292 * UNDI driver initializes CID offset for normal bell to 0x7
7293 */
4a37fb66 7294 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7295 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7296 if (val == 0x7) {
7297 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7298 /* save our func */
34f80b04 7299 int func = BP_FUNC(bp);
da5a662a
VZ
7300 u32 swap_en;
7301 u32 swap_val;
34f80b04 7302
b4661739
EG
7303 /* clear the UNDI indication */
7304 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7305
34f80b04
EG
7306 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7307
7308 /* try unload UNDI on port 0 */
7309 bp->func = 0;
da5a662a
VZ
7310 bp->fw_seq =
7311 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7312 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 7313 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7314
7315 /* if UNDI is loaded on the other port */
7316 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7317
da5a662a
VZ
7318 /* send "DONE" for previous unload */
7319 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7320
7321 /* unload UNDI on port 1 */
34f80b04 7322 bp->func = 1;
da5a662a
VZ
7323 bp->fw_seq =
7324 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7325 DRV_MSG_SEQ_NUMBER_MASK);
7326 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7327
7328 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7329 }
7330
b4661739
EG
7331 /* now it's safe to release the lock */
7332 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7333
f1ef27ef 7334 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
7335
7336 /* close input traffic and wait for it */
7337 /* Do not rcv packets to BRB */
7338 REG_WR(bp,
7339 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7340 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7341 /* Do not direct rcv packets that are not for MCP to
7342 * the BRB */
7343 REG_WR(bp,
7344 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7345 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7346 /* clear AEU */
7347 REG_WR(bp,
7348 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7349 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7350 msleep(10);
7351
7352 /* save NIG port swap info */
7353 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7354 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7355 /* reset device */
7356 REG_WR(bp,
7357 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7358 0xd3ffffff);
34f80b04
EG
7359 REG_WR(bp,
7360 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7361 0x1403);
da5a662a
VZ
7362 /* take the NIG out of reset and restore swap values */
7363 REG_WR(bp,
7364 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7365 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7366 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7367 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7368
7369 /* send unload done to the MCP */
7370 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7371
7372 /* restore our func and fw_seq */
7373 bp->func = func;
7374 bp->fw_seq =
7375 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7376 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7377
7378 } else
7379 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7380 }
7381}
7382
7383static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7384{
7385 u32 val, val2, val3, val4, id;
72ce58c3 7386 u16 pmc;
34f80b04
EG
7387
7388 /* Get the chip revision id and number. */
7389 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7390 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7391 id = ((val & 0xffff) << 16);
7392 val = REG_RD(bp, MISC_REG_CHIP_REV);
7393 id |= ((val & 0xf) << 12);
7394 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7395 id |= ((val & 0xff) << 4);
5a40e08e 7396 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7397 id |= (val & 0xf);
7398 bp->common.chip_id = id;
7399 bp->link_params.chip_id = bp->common.chip_id;
7400 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7401
1c06328c
EG
7402 val = (REG_RD(bp, 0x2874) & 0x55);
7403 if ((bp->common.chip_id & 0x1) ||
7404 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7405 bp->flags |= ONE_PORT_FLAG;
7406 BNX2X_DEV_INFO("single port device\n");
7407 }
7408
34f80b04
EG
7409 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7410 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7411 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7412 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7413 bp->common.flash_size, bp->common.flash_size);
7414
7415 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7416 bp->link_params.shmem_base = bp->common.shmem_base;
7417 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7418
7419 if (!bp->common.shmem_base ||
7420 (bp->common.shmem_base < 0xA0000) ||
7421 (bp->common.shmem_base >= 0xC0000)) {
7422 BNX2X_DEV_INFO("MCP not active\n");
7423 bp->flags |= NO_MCP_FLAG;
7424 return;
7425 }
7426
7427 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7428 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7429 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7430 BNX2X_ERR("BAD MCP validity signature\n");
7431
7432 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 7433 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
7434
7435 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7436 SHARED_HW_CFG_LED_MODE_MASK) >>
7437 SHARED_HW_CFG_LED_MODE_SHIFT);
7438
7439 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7440 bp->common.bc_ver = val;
7441 BNX2X_DEV_INFO("bc_ver %X\n", val);
7442 if (val < BNX2X_BC_VER) {
7443 /* for now only warn
7444 * later we might need to enforce this */
7445 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7446 " please upgrade BC\n", BNX2X_BC_VER, val);
7447 }
72ce58c3
EG
7448
7449 if (BP_E1HVN(bp) == 0) {
7450 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7451 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7452 } else {
7453 /* no WOL capability for E1HVN != 0 */
7454 bp->flags |= NO_WOL_FLAG;
7455 }
7456 BNX2X_DEV_INFO("%sWoL capable\n",
7457 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
34f80b04
EG
7458
7459 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7460 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7461 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7462 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7463
7464 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7465 val, val2, val3, val4);
7466}
7467
7468static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7469 u32 switch_cfg)
a2fbb9ea 7470{
34f80b04 7471 int port = BP_PORT(bp);
a2fbb9ea
ET
7472 u32 ext_phy_type;
7473
a2fbb9ea
ET
7474 switch (switch_cfg) {
7475 case SWITCH_CFG_1G:
7476 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7477
c18487ee
YR
7478 ext_phy_type =
7479 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7480 switch (ext_phy_type) {
7481 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7482 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7483 ext_phy_type);
7484
34f80b04
EG
7485 bp->port.supported |= (SUPPORTED_10baseT_Half |
7486 SUPPORTED_10baseT_Full |
7487 SUPPORTED_100baseT_Half |
7488 SUPPORTED_100baseT_Full |
7489 SUPPORTED_1000baseT_Full |
7490 SUPPORTED_2500baseX_Full |
7491 SUPPORTED_TP |
7492 SUPPORTED_FIBRE |
7493 SUPPORTED_Autoneg |
7494 SUPPORTED_Pause |
7495 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7496 break;
7497
7498 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7499 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7500 ext_phy_type);
7501
34f80b04
EG
7502 bp->port.supported |= (SUPPORTED_10baseT_Half |
7503 SUPPORTED_10baseT_Full |
7504 SUPPORTED_100baseT_Half |
7505 SUPPORTED_100baseT_Full |
7506 SUPPORTED_1000baseT_Full |
7507 SUPPORTED_TP |
7508 SUPPORTED_FIBRE |
7509 SUPPORTED_Autoneg |
7510 SUPPORTED_Pause |
7511 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7512 break;
7513
7514 default:
7515 BNX2X_ERR("NVRAM config error. "
7516 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7517 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7518 return;
7519 }
7520
34f80b04
EG
7521 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7522 port*0x10);
7523 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7524 break;
7525
7526 case SWITCH_CFG_10G:
7527 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7528
c18487ee
YR
7529 ext_phy_type =
7530 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7531 switch (ext_phy_type) {
7532 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7533 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7534 ext_phy_type);
7535
34f80b04
EG
7536 bp->port.supported |= (SUPPORTED_10baseT_Half |
7537 SUPPORTED_10baseT_Full |
7538 SUPPORTED_100baseT_Half |
7539 SUPPORTED_100baseT_Full |
7540 SUPPORTED_1000baseT_Full |
7541 SUPPORTED_2500baseX_Full |
7542 SUPPORTED_10000baseT_Full |
7543 SUPPORTED_TP |
7544 SUPPORTED_FIBRE |
7545 SUPPORTED_Autoneg |
7546 SUPPORTED_Pause |
7547 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7548 break;
7549
7550 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
f1410647 7551 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
34f80b04 7552 ext_phy_type);
f1410647 7553
34f80b04
EG
7554 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7555 SUPPORTED_FIBRE |
7556 SUPPORTED_Pause |
7557 SUPPORTED_Asym_Pause);
f1410647
ET
7558 break;
7559
a2fbb9ea 7560 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
f1410647
ET
7561 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7562 ext_phy_type);
7563
34f80b04
EG
7564 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7565 SUPPORTED_1000baseT_Full |
7566 SUPPORTED_FIBRE |
7567 SUPPORTED_Pause |
7568 SUPPORTED_Asym_Pause);
f1410647
ET
7569 break;
7570
7571 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7572 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
a2fbb9ea
ET
7573 ext_phy_type);
7574
34f80b04
EG
7575 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7576 SUPPORTED_1000baseT_Full |
7577 SUPPORTED_FIBRE |
7578 SUPPORTED_Autoneg |
7579 SUPPORTED_Pause |
7580 SUPPORTED_Asym_Pause);
f1410647
ET
7581 break;
7582
c18487ee
YR
7583 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7584 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7585 ext_phy_type);
7586
34f80b04
EG
7587 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7588 SUPPORTED_2500baseX_Full |
7589 SUPPORTED_1000baseT_Full |
7590 SUPPORTED_FIBRE |
7591 SUPPORTED_Autoneg |
7592 SUPPORTED_Pause |
7593 SUPPORTED_Asym_Pause);
c18487ee
YR
7594 break;
7595
f1410647
ET
7596 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7597 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7598 ext_phy_type);
7599
34f80b04
EG
7600 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7601 SUPPORTED_TP |
7602 SUPPORTED_Autoneg |
7603 SUPPORTED_Pause |
7604 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7605 break;
7606
c18487ee
YR
7607 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7608 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7609 bp->link_params.ext_phy_config);
7610 break;
7611
a2fbb9ea
ET
7612 default:
7613 BNX2X_ERR("NVRAM config error. "
7614 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7615 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7616 return;
7617 }
7618
34f80b04
EG
7619 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7620 port*0x18);
7621 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7622
a2fbb9ea
ET
7623 break;
7624
7625 default:
7626 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7627 bp->port.link_config);
a2fbb9ea
ET
7628 return;
7629 }
34f80b04 7630 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7631
7632 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7633 if (!(bp->link_params.speed_cap_mask &
7634 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7635 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7636
c18487ee
YR
7637 if (!(bp->link_params.speed_cap_mask &
7638 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7639 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7640
c18487ee
YR
7641 if (!(bp->link_params.speed_cap_mask &
7642 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7643 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7644
c18487ee
YR
7645 if (!(bp->link_params.speed_cap_mask &
7646 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7647 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7648
c18487ee
YR
7649 if (!(bp->link_params.speed_cap_mask &
7650 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7651 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7652 SUPPORTED_1000baseT_Full);
a2fbb9ea 7653
c18487ee
YR
7654 if (!(bp->link_params.speed_cap_mask &
7655 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7656 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7657
c18487ee
YR
7658 if (!(bp->link_params.speed_cap_mask &
7659 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7660 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7661
34f80b04 7662 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7663}
7664
34f80b04 7665static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7666{
c18487ee 7667 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7668
34f80b04 7669 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7670 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7671 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7672 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7673 bp->port.advertising = bp->port.supported;
a2fbb9ea 7674 } else {
c18487ee
YR
7675 u32 ext_phy_type =
7676 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7677
7678 if ((ext_phy_type ==
7679 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7680 (ext_phy_type ==
7681 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7682 /* force 10G, no AN */
c18487ee 7683 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7684 bp->port.advertising =
a2fbb9ea
ET
7685 (ADVERTISED_10000baseT_Full |
7686 ADVERTISED_FIBRE);
7687 break;
7688 }
7689 BNX2X_ERR("NVRAM config error. "
7690 "Invalid link_config 0x%x"
7691 " Autoneg not supported\n",
34f80b04 7692 bp->port.link_config);
a2fbb9ea
ET
7693 return;
7694 }
7695 break;
7696
7697 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7698 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7699 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7700 bp->port.advertising = (ADVERTISED_10baseT_Full |
7701 ADVERTISED_TP);
a2fbb9ea
ET
7702 } else {
7703 BNX2X_ERR("NVRAM config error. "
7704 "Invalid link_config 0x%x"
7705 " speed_cap_mask 0x%x\n",
34f80b04 7706 bp->port.link_config,
c18487ee 7707 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7708 return;
7709 }
7710 break;
7711
7712 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7713 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7714 bp->link_params.req_line_speed = SPEED_10;
7715 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7716 bp->port.advertising = (ADVERTISED_10baseT_Half |
7717 ADVERTISED_TP);
a2fbb9ea
ET
7718 } else {
7719 BNX2X_ERR("NVRAM config error. "
7720 "Invalid link_config 0x%x"
7721 " speed_cap_mask 0x%x\n",
34f80b04 7722 bp->port.link_config,
c18487ee 7723 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7724 return;
7725 }
7726 break;
7727
7728 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7729 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7730 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7731 bp->port.advertising = (ADVERTISED_100baseT_Full |
7732 ADVERTISED_TP);
a2fbb9ea
ET
7733 } else {
7734 BNX2X_ERR("NVRAM config error. "
7735 "Invalid link_config 0x%x"
7736 " speed_cap_mask 0x%x\n",
34f80b04 7737 bp->port.link_config,
c18487ee 7738 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7739 return;
7740 }
7741 break;
7742
7743 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7744 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7745 bp->link_params.req_line_speed = SPEED_100;
7746 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7747 bp->port.advertising = (ADVERTISED_100baseT_Half |
7748 ADVERTISED_TP);
a2fbb9ea
ET
7749 } else {
7750 BNX2X_ERR("NVRAM config error. "
7751 "Invalid link_config 0x%x"
7752 " speed_cap_mask 0x%x\n",
34f80b04 7753 bp->port.link_config,
c18487ee 7754 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7755 return;
7756 }
7757 break;
7758
7759 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7760 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7761 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7762 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7763 ADVERTISED_TP);
a2fbb9ea
ET
7764 } else {
7765 BNX2X_ERR("NVRAM config error. "
7766 "Invalid link_config 0x%x"
7767 " speed_cap_mask 0x%x\n",
34f80b04 7768 bp->port.link_config,
c18487ee 7769 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7770 return;
7771 }
7772 break;
7773
7774 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 7775 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 7776 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
7777 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7778 ADVERTISED_TP);
a2fbb9ea
ET
7779 } else {
7780 BNX2X_ERR("NVRAM config error. "
7781 "Invalid link_config 0x%x"
7782 " speed_cap_mask 0x%x\n",
34f80b04 7783 bp->port.link_config,
c18487ee 7784 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7785 return;
7786 }
7787 break;
7788
7789 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7790 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7791 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 7792 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 7793 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
7794 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7795 ADVERTISED_FIBRE);
a2fbb9ea
ET
7796 } else {
7797 BNX2X_ERR("NVRAM config error. "
7798 "Invalid link_config 0x%x"
7799 " speed_cap_mask 0x%x\n",
34f80b04 7800 bp->port.link_config,
c18487ee 7801 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7802 return;
7803 }
7804 break;
7805
7806 default:
7807 BNX2X_ERR("NVRAM config error. "
7808 "BAD link speed link_config 0x%x\n",
34f80b04 7809 bp->port.link_config);
c18487ee 7810 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7811 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
7812 break;
7813 }
a2fbb9ea 7814
34f80b04
EG
7815 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7816 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 7817 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 7818 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 7819 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 7820
c18487ee 7821 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 7822 " advertising 0x%x\n",
c18487ee
YR
7823 bp->link_params.req_line_speed,
7824 bp->link_params.req_duplex,
34f80b04 7825 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
7826}
7827
34f80b04 7828static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 7829{
34f80b04
EG
7830 int port = BP_PORT(bp);
7831 u32 val, val2;
a2fbb9ea 7832
c18487ee 7833 bp->link_params.bp = bp;
34f80b04 7834 bp->link_params.port = port;
c18487ee 7835
c18487ee 7836 bp->link_params.serdes_config =
f1410647 7837 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
c18487ee 7838 bp->link_params.lane_config =
a2fbb9ea 7839 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 7840 bp->link_params.ext_phy_config =
a2fbb9ea
ET
7841 SHMEM_RD(bp,
7842 dev_info.port_hw_config[port].external_phy_config);
c18487ee 7843 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
7844 SHMEM_RD(bp,
7845 dev_info.port_hw_config[port].speed_capability_mask);
7846
34f80b04 7847 bp->port.link_config =
a2fbb9ea
ET
7848 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7849
34f80b04
EG
7850 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7851 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7852 " link_config 0x%08x\n",
c18487ee
YR
7853 bp->link_params.serdes_config,
7854 bp->link_params.lane_config,
7855 bp->link_params.ext_phy_config,
34f80b04 7856 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 7857
34f80b04 7858 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
7859 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7860 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
7861
7862 bnx2x_link_settings_requested(bp);
7863
7864 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7865 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7866 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7867 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7868 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7869 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7870 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7871 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
7872 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7873 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
7874}
7875
7876static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7877{
7878 int func = BP_FUNC(bp);
7879 u32 val, val2;
7880 int rc = 0;
a2fbb9ea 7881
34f80b04 7882 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 7883
34f80b04
EG
7884 bp->e1hov = 0;
7885 bp->e1hmf = 0;
7886 if (CHIP_IS_E1H(bp)) {
7887 bp->mf_config =
7888 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 7889
3196a88a
EG
7890 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7891 FUNC_MF_CFG_E1HOV_TAG_MASK);
34f80b04 7892 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 7893
34f80b04
EG
7894 bp->e1hov = val;
7895 bp->e1hmf = 1;
7896 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7897 "(0x%04x)\n",
7898 func, bp->e1hov, bp->e1hov);
7899 } else {
7900 BNX2X_DEV_INFO("Single function mode\n");
7901 if (BP_E1HVN(bp)) {
7902 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7903 " aborting\n", func);
7904 rc = -EPERM;
7905 }
7906 }
7907 }
a2fbb9ea 7908
34f80b04
EG
7909 if (!BP_NOMCP(bp)) {
7910 bnx2x_get_port_hwinfo(bp);
7911
7912 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7913 DRV_MSG_SEQ_NUMBER_MASK);
7914 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7915 }
7916
7917 if (IS_E1HMF(bp)) {
7918 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7919 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7920 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7921 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7922 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7923 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7924 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7925 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7926 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7927 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7928 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7929 ETH_ALEN);
7930 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7931 ETH_ALEN);
a2fbb9ea 7932 }
34f80b04
EG
7933
7934 return rc;
a2fbb9ea
ET
7935 }
7936
34f80b04
EG
7937 if (BP_NOMCP(bp)) {
7938 /* only supposed to happen on emulation/FPGA */
33471629 7939 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
7940 random_ether_addr(bp->dev->dev_addr);
7941 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7942 }
a2fbb9ea 7943
34f80b04
EG
7944 return rc;
7945}
7946
7947static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7948{
7949 int func = BP_FUNC(bp);
7950 int rc;
7951
da5a662a
VZ
7952 /* Disable interrupt handling until HW is initialized */
7953 atomic_set(&bp->intr_sem, 1);
7954
34f80b04 7955 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 7956
1cf167f2 7957 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
7958 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7959
7960 rc = bnx2x_get_hwinfo(bp);
7961
7962 /* need to reset chip if undi was active */
7963 if (!BP_NOMCP(bp))
7964 bnx2x_undi_unload(bp);
7965
7966 if (CHIP_REV_IS_FPGA(bp))
7967 printk(KERN_ERR PFX "FPGA detected\n");
7968
7969 if (BP_NOMCP(bp) && (func == 0))
7970 printk(KERN_ERR PFX
7971 "MCP disabled, must load devices in order!\n");
7972
555f6c78 7973 /* Set multi queue mode */
8badd27a
EG
7974 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
7975 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 7976 printk(KERN_ERR PFX
8badd27a 7977 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
7978 multi_mode = ETH_RSS_MODE_DISABLED;
7979 }
7980 bp->multi_mode = multi_mode;
7981
7982
7a9b2557
VZ
7983 /* Set TPA flags */
7984 if (disable_tpa) {
7985 bp->flags &= ~TPA_ENABLE_FLAG;
7986 bp->dev->features &= ~NETIF_F_LRO;
7987 } else {
7988 bp->flags |= TPA_ENABLE_FLAG;
7989 bp->dev->features |= NETIF_F_LRO;
7990 }
7991
7992
34f80b04
EG
7993 bp->tx_ring_size = MAX_TX_AVAIL;
7994 bp->rx_ring_size = MAX_RX_AVAIL;
7995
7996 bp->rx_csum = 1;
7997 bp->rx_offset = 0;
7998
7999 bp->tx_ticks = 50;
8000 bp->rx_ticks = 25;
8001
34f80b04
EG
8002 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8003 bp->current_interval = (poll ? poll : bp->timer_interval);
8004
8005 init_timer(&bp->timer);
8006 bp->timer.expires = jiffies + bp->current_interval;
8007 bp->timer.data = (unsigned long) bp;
8008 bp->timer.function = bnx2x_timer;
8009
8010 return rc;
a2fbb9ea
ET
8011}
8012
8013/*
8014 * ethtool service functions
8015 */
8016
8017/* All ethtool functions called with rtnl_lock */
8018
8019static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8020{
8021 struct bnx2x *bp = netdev_priv(dev);
8022
34f80b04
EG
8023 cmd->supported = bp->port.supported;
8024 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
8025
8026 if (netif_carrier_ok(dev)) {
c18487ee
YR
8027 cmd->speed = bp->link_vars.line_speed;
8028 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 8029 } else {
c18487ee
YR
8030 cmd->speed = bp->link_params.req_line_speed;
8031 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 8032 }
34f80b04
EG
8033 if (IS_E1HMF(bp)) {
8034 u16 vn_max_rate;
8035
8036 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8037 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8038 if (vn_max_rate < cmd->speed)
8039 cmd->speed = vn_max_rate;
8040 }
a2fbb9ea 8041
c18487ee
YR
8042 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8043 u32 ext_phy_type =
8044 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
8045
8046 switch (ext_phy_type) {
8047 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8048 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8049 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8050 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 8051 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
f1410647
ET
8052 cmd->port = PORT_FIBRE;
8053 break;
8054
8055 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8056 cmd->port = PORT_TP;
8057 break;
8058
c18487ee
YR
8059 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8060 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8061 bp->link_params.ext_phy_config);
8062 break;
8063
f1410647
ET
8064 default:
8065 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
8066 bp->link_params.ext_phy_config);
8067 break;
f1410647
ET
8068 }
8069 } else
a2fbb9ea 8070 cmd->port = PORT_TP;
a2fbb9ea 8071
34f80b04 8072 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
8073 cmd->transceiver = XCVR_INTERNAL;
8074
c18487ee 8075 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 8076 cmd->autoneg = AUTONEG_ENABLE;
f1410647 8077 else
a2fbb9ea 8078 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
8079
8080 cmd->maxtxpkt = 0;
8081 cmd->maxrxpkt = 0;
8082
8083 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8084 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8085 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8086 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8087 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8088 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8089 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8090
8091 return 0;
8092}
8093
8094static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8095{
8096 struct bnx2x *bp = netdev_priv(dev);
8097 u32 advertising;
8098
34f80b04
EG
8099 if (IS_E1HMF(bp))
8100 return 0;
8101
a2fbb9ea
ET
8102 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8103 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8104 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8105 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8106 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8107 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8108 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8109
a2fbb9ea 8110 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
8111 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8112 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 8113 return -EINVAL;
f1410647 8114 }
a2fbb9ea
ET
8115
8116 /* advertise the requested speed and duplex if supported */
34f80b04 8117 cmd->advertising &= bp->port.supported;
a2fbb9ea 8118
c18487ee
YR
8119 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8120 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
8121 bp->port.advertising |= (ADVERTISED_Autoneg |
8122 cmd->advertising);
a2fbb9ea
ET
8123
8124 } else { /* forced speed */
8125 /* advertise the requested speed and duplex if supported */
8126 switch (cmd->speed) {
8127 case SPEED_10:
8128 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8129 if (!(bp->port.supported &
f1410647
ET
8130 SUPPORTED_10baseT_Full)) {
8131 DP(NETIF_MSG_LINK,
8132 "10M full not supported\n");
a2fbb9ea 8133 return -EINVAL;
f1410647 8134 }
a2fbb9ea
ET
8135
8136 advertising = (ADVERTISED_10baseT_Full |
8137 ADVERTISED_TP);
8138 } else {
34f80b04 8139 if (!(bp->port.supported &
f1410647
ET
8140 SUPPORTED_10baseT_Half)) {
8141 DP(NETIF_MSG_LINK,
8142 "10M half not supported\n");
a2fbb9ea 8143 return -EINVAL;
f1410647 8144 }
a2fbb9ea
ET
8145
8146 advertising = (ADVERTISED_10baseT_Half |
8147 ADVERTISED_TP);
8148 }
8149 break;
8150
8151 case SPEED_100:
8152 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8153 if (!(bp->port.supported &
f1410647
ET
8154 SUPPORTED_100baseT_Full)) {
8155 DP(NETIF_MSG_LINK,
8156 "100M full not supported\n");
a2fbb9ea 8157 return -EINVAL;
f1410647 8158 }
a2fbb9ea
ET
8159
8160 advertising = (ADVERTISED_100baseT_Full |
8161 ADVERTISED_TP);
8162 } else {
34f80b04 8163 if (!(bp->port.supported &
f1410647
ET
8164 SUPPORTED_100baseT_Half)) {
8165 DP(NETIF_MSG_LINK,
8166 "100M half not supported\n");
a2fbb9ea 8167 return -EINVAL;
f1410647 8168 }
a2fbb9ea
ET
8169
8170 advertising = (ADVERTISED_100baseT_Half |
8171 ADVERTISED_TP);
8172 }
8173 break;
8174
8175 case SPEED_1000:
f1410647
ET
8176 if (cmd->duplex != DUPLEX_FULL) {
8177 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 8178 return -EINVAL;
f1410647 8179 }
a2fbb9ea 8180
34f80b04 8181 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 8182 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 8183 return -EINVAL;
f1410647 8184 }
a2fbb9ea
ET
8185
8186 advertising = (ADVERTISED_1000baseT_Full |
8187 ADVERTISED_TP);
8188 break;
8189
8190 case SPEED_2500:
f1410647
ET
8191 if (cmd->duplex != DUPLEX_FULL) {
8192 DP(NETIF_MSG_LINK,
8193 "2.5G half not supported\n");
a2fbb9ea 8194 return -EINVAL;
f1410647 8195 }
a2fbb9ea 8196
34f80b04 8197 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
8198 DP(NETIF_MSG_LINK,
8199 "2.5G full not supported\n");
a2fbb9ea 8200 return -EINVAL;
f1410647 8201 }
a2fbb9ea 8202
f1410647 8203 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
8204 ADVERTISED_TP);
8205 break;
8206
8207 case SPEED_10000:
f1410647
ET
8208 if (cmd->duplex != DUPLEX_FULL) {
8209 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 8210 return -EINVAL;
f1410647 8211 }
a2fbb9ea 8212
34f80b04 8213 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 8214 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 8215 return -EINVAL;
f1410647 8216 }
a2fbb9ea
ET
8217
8218 advertising = (ADVERTISED_10000baseT_Full |
8219 ADVERTISED_FIBRE);
8220 break;
8221
8222 default:
f1410647 8223 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
8224 return -EINVAL;
8225 }
8226
c18487ee
YR
8227 bp->link_params.req_line_speed = cmd->speed;
8228 bp->link_params.req_duplex = cmd->duplex;
34f80b04 8229 bp->port.advertising = advertising;
a2fbb9ea
ET
8230 }
8231
c18487ee 8232 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 8233 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 8234 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 8235 bp->port.advertising);
a2fbb9ea 8236
34f80b04 8237 if (netif_running(dev)) {
bb2a0f7a 8238 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8239 bnx2x_link_set(bp);
8240 }
a2fbb9ea
ET
8241
8242 return 0;
8243}
8244
c18487ee
YR
8245#define PHY_FW_VER_LEN 10
8246
a2fbb9ea
ET
8247static void bnx2x_get_drvinfo(struct net_device *dev,
8248 struct ethtool_drvinfo *info)
8249{
8250 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 8251 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
8252
8253 strcpy(info->driver, DRV_MODULE_NAME);
8254 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
8255
8256 phy_fw_ver[0] = '\0';
34f80b04 8257 if (bp->port.pmf) {
4a37fb66 8258 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8259 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8260 (bp->state != BNX2X_STATE_CLOSED),
8261 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 8262 bnx2x_release_phy_lock(bp);
34f80b04 8263 }
c18487ee 8264
f0e53a84
EG
8265 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8266 (bp->common.bc_ver & 0xff0000) >> 16,
8267 (bp->common.bc_ver & 0xff00) >> 8,
8268 (bp->common.bc_ver & 0xff),
8269 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
8270 strcpy(info->bus_info, pci_name(bp->pdev));
8271 info->n_stats = BNX2X_NUM_STATS;
8272 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 8273 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
8274 info->regdump_len = 0;
8275}
8276
8277static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8278{
8279 struct bnx2x *bp = netdev_priv(dev);
8280
8281 if (bp->flags & NO_WOL_FLAG) {
8282 wol->supported = 0;
8283 wol->wolopts = 0;
8284 } else {
8285 wol->supported = WAKE_MAGIC;
8286 if (bp->wol)
8287 wol->wolopts = WAKE_MAGIC;
8288 else
8289 wol->wolopts = 0;
8290 }
8291 memset(&wol->sopass, 0, sizeof(wol->sopass));
8292}
8293
8294static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8295{
8296 struct bnx2x *bp = netdev_priv(dev);
8297
8298 if (wol->wolopts & ~WAKE_MAGIC)
8299 return -EINVAL;
8300
8301 if (wol->wolopts & WAKE_MAGIC) {
8302 if (bp->flags & NO_WOL_FLAG)
8303 return -EINVAL;
8304
8305 bp->wol = 1;
34f80b04 8306 } else
a2fbb9ea 8307 bp->wol = 0;
34f80b04 8308
a2fbb9ea
ET
8309 return 0;
8310}
8311
8312static u32 bnx2x_get_msglevel(struct net_device *dev)
8313{
8314 struct bnx2x *bp = netdev_priv(dev);
8315
8316 return bp->msglevel;
8317}
8318
8319static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8320{
8321 struct bnx2x *bp = netdev_priv(dev);
8322
8323 if (capable(CAP_NET_ADMIN))
8324 bp->msglevel = level;
8325}
8326
8327static int bnx2x_nway_reset(struct net_device *dev)
8328{
8329 struct bnx2x *bp = netdev_priv(dev);
8330
34f80b04
EG
8331 if (!bp->port.pmf)
8332 return 0;
a2fbb9ea 8333
34f80b04 8334 if (netif_running(dev)) {
bb2a0f7a 8335 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8336 bnx2x_link_set(bp);
8337 }
a2fbb9ea
ET
8338
8339 return 0;
8340}
8341
8342static int bnx2x_get_eeprom_len(struct net_device *dev)
8343{
8344 struct bnx2x *bp = netdev_priv(dev);
8345
34f80b04 8346 return bp->common.flash_size;
a2fbb9ea
ET
8347}
8348
8349static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8350{
34f80b04 8351 int port = BP_PORT(bp);
a2fbb9ea
ET
8352 int count, i;
8353 u32 val = 0;
8354
8355 /* adjust timeout for emulation/FPGA */
8356 count = NVRAM_TIMEOUT_COUNT;
8357 if (CHIP_REV_IS_SLOW(bp))
8358 count *= 100;
8359
8360 /* request access to nvram interface */
8361 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8362 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8363
8364 for (i = 0; i < count*10; i++) {
8365 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8366 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8367 break;
8368
8369 udelay(5);
8370 }
8371
8372 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 8373 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
8374 return -EBUSY;
8375 }
8376
8377 return 0;
8378}
8379
8380static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8381{
34f80b04 8382 int port = BP_PORT(bp);
a2fbb9ea
ET
8383 int count, i;
8384 u32 val = 0;
8385
8386 /* adjust timeout for emulation/FPGA */
8387 count = NVRAM_TIMEOUT_COUNT;
8388 if (CHIP_REV_IS_SLOW(bp))
8389 count *= 100;
8390
8391 /* relinquish nvram interface */
8392 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8393 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8394
8395 for (i = 0; i < count*10; i++) {
8396 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8397 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8398 break;
8399
8400 udelay(5);
8401 }
8402
8403 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 8404 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
8405 return -EBUSY;
8406 }
8407
8408 return 0;
8409}
8410
8411static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8412{
8413 u32 val;
8414
8415 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8416
8417 /* enable both bits, even on read */
8418 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8419 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8420 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8421}
8422
8423static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8424{
8425 u32 val;
8426
8427 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8428
8429 /* disable both bits, even after read */
8430 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8431 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8432 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8433}
8434
8435static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8436 u32 cmd_flags)
8437{
f1410647 8438 int count, i, rc;
a2fbb9ea
ET
8439 u32 val;
8440
8441 /* build the command word */
8442 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8443
8444 /* need to clear DONE bit separately */
8445 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8446
8447 /* address of the NVRAM to read from */
8448 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8449 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8450
8451 /* issue a read command */
8452 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8453
8454 /* adjust timeout for emulation/FPGA */
8455 count = NVRAM_TIMEOUT_COUNT;
8456 if (CHIP_REV_IS_SLOW(bp))
8457 count *= 100;
8458
8459 /* wait for completion */
8460 *ret_val = 0;
8461 rc = -EBUSY;
8462 for (i = 0; i < count; i++) {
8463 udelay(5);
8464 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8465
8466 if (val & MCPR_NVM_COMMAND_DONE) {
8467 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
8468 /* we read nvram data in cpu order
8469 * but ethtool sees it as an array of bytes
8470 * converting to big-endian will do the work */
8471 val = cpu_to_be32(val);
8472 *ret_val = val;
8473 rc = 0;
8474 break;
8475 }
8476 }
8477
8478 return rc;
8479}
8480
8481static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8482 int buf_size)
8483{
8484 int rc;
8485 u32 cmd_flags;
8486 u32 val;
8487
8488 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8489 DP(BNX2X_MSG_NVM,
c14423fe 8490 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8491 offset, buf_size);
8492 return -EINVAL;
8493 }
8494
34f80b04
EG
8495 if (offset + buf_size > bp->common.flash_size) {
8496 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8497 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8498 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8499 return -EINVAL;
8500 }
8501
8502 /* request access to nvram interface */
8503 rc = bnx2x_acquire_nvram_lock(bp);
8504 if (rc)
8505 return rc;
8506
8507 /* enable access to nvram interface */
8508 bnx2x_enable_nvram_access(bp);
8509
8510 /* read the first word(s) */
8511 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8512 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8513 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8514 memcpy(ret_buf, &val, 4);
8515
8516 /* advance to the next dword */
8517 offset += sizeof(u32);
8518 ret_buf += sizeof(u32);
8519 buf_size -= sizeof(u32);
8520 cmd_flags = 0;
8521 }
8522
8523 if (rc == 0) {
8524 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8525 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8526 memcpy(ret_buf, &val, 4);
8527 }
8528
8529 /* disable access to nvram interface */
8530 bnx2x_disable_nvram_access(bp);
8531 bnx2x_release_nvram_lock(bp);
8532
8533 return rc;
8534}
8535
8536static int bnx2x_get_eeprom(struct net_device *dev,
8537 struct ethtool_eeprom *eeprom, u8 *eebuf)
8538{
8539 struct bnx2x *bp = netdev_priv(dev);
8540 int rc;
8541
2add3acb
EG
8542 if (!netif_running(dev))
8543 return -EAGAIN;
8544
34f80b04 8545 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8546 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8547 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8548 eeprom->len, eeprom->len);
8549
8550 /* parameters already validated in ethtool_get_eeprom */
8551
8552 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8553
8554 return rc;
8555}
8556
8557static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8558 u32 cmd_flags)
8559{
f1410647 8560 int count, i, rc;
a2fbb9ea
ET
8561
8562 /* build the command word */
8563 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8564
8565 /* need to clear DONE bit separately */
8566 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8567
8568 /* write the data */
8569 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8570
8571 /* address of the NVRAM to write to */
8572 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8573 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8574
8575 /* issue the write command */
8576 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8577
8578 /* adjust timeout for emulation/FPGA */
8579 count = NVRAM_TIMEOUT_COUNT;
8580 if (CHIP_REV_IS_SLOW(bp))
8581 count *= 100;
8582
8583 /* wait for completion */
8584 rc = -EBUSY;
8585 for (i = 0; i < count; i++) {
8586 udelay(5);
8587 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8588 if (val & MCPR_NVM_COMMAND_DONE) {
8589 rc = 0;
8590 break;
8591 }
8592 }
8593
8594 return rc;
8595}
8596
f1410647 8597#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8598
8599static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8600 int buf_size)
8601{
8602 int rc;
8603 u32 cmd_flags;
8604 u32 align_offset;
8605 u32 val;
8606
34f80b04
EG
8607 if (offset + buf_size > bp->common.flash_size) {
8608 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8609 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8610 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8611 return -EINVAL;
8612 }
8613
8614 /* request access to nvram interface */
8615 rc = bnx2x_acquire_nvram_lock(bp);
8616 if (rc)
8617 return rc;
8618
8619 /* enable access to nvram interface */
8620 bnx2x_enable_nvram_access(bp);
8621
8622 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8623 align_offset = (offset & ~0x03);
8624 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8625
8626 if (rc == 0) {
8627 val &= ~(0xff << BYTE_OFFSET(offset));
8628 val |= (*data_buf << BYTE_OFFSET(offset));
8629
8630 /* nvram data is returned as an array of bytes
8631 * convert it back to cpu order */
8632 val = be32_to_cpu(val);
8633
a2fbb9ea
ET
8634 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8635 cmd_flags);
8636 }
8637
8638 /* disable access to nvram interface */
8639 bnx2x_disable_nvram_access(bp);
8640 bnx2x_release_nvram_lock(bp);
8641
8642 return rc;
8643}
8644
8645static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8646 int buf_size)
8647{
8648 int rc;
8649 u32 cmd_flags;
8650 u32 val;
8651 u32 written_so_far;
8652
34f80b04 8653 if (buf_size == 1) /* ethtool */
a2fbb9ea 8654 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
8655
8656 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8657 DP(BNX2X_MSG_NVM,
c14423fe 8658 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8659 offset, buf_size);
8660 return -EINVAL;
8661 }
8662
34f80b04
EG
8663 if (offset + buf_size > bp->common.flash_size) {
8664 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8665 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8666 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8667 return -EINVAL;
8668 }
8669
8670 /* request access to nvram interface */
8671 rc = bnx2x_acquire_nvram_lock(bp);
8672 if (rc)
8673 return rc;
8674
8675 /* enable access to nvram interface */
8676 bnx2x_enable_nvram_access(bp);
8677
8678 written_so_far = 0;
8679 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8680 while ((written_so_far < buf_size) && (rc == 0)) {
8681 if (written_so_far == (buf_size - sizeof(u32)))
8682 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8683 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8684 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8685 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8686 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8687
8688 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
8689
8690 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8691
8692 /* advance to the next dword */
8693 offset += sizeof(u32);
8694 data_buf += sizeof(u32);
8695 written_so_far += sizeof(u32);
8696 cmd_flags = 0;
8697 }
8698
8699 /* disable access to nvram interface */
8700 bnx2x_disable_nvram_access(bp);
8701 bnx2x_release_nvram_lock(bp);
8702
8703 return rc;
8704}
8705
8706static int bnx2x_set_eeprom(struct net_device *dev,
8707 struct ethtool_eeprom *eeprom, u8 *eebuf)
8708{
8709 struct bnx2x *bp = netdev_priv(dev);
8710 int rc;
8711
9f4c9583
EG
8712 if (!netif_running(dev))
8713 return -EAGAIN;
8714
34f80b04 8715 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8716 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8717 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8718 eeprom->len, eeprom->len);
8719
8720 /* parameters already validated in ethtool_set_eeprom */
8721
c18487ee 8722 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
8723 if (eeprom->magic == 0x00504859)
8724 if (bp->port.pmf) {
8725
4a37fb66 8726 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8727 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8728 bp->link_params.ext_phy_config,
8729 (bp->state != BNX2X_STATE_CLOSED),
8730 eebuf, eeprom->len);
bb2a0f7a
YG
8731 if ((bp->state == BNX2X_STATE_OPEN) ||
8732 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04
EG
8733 rc |= bnx2x_link_reset(&bp->link_params,
8734 &bp->link_vars);
8735 rc |= bnx2x_phy_init(&bp->link_params,
8736 &bp->link_vars);
bb2a0f7a 8737 }
4a37fb66 8738 bnx2x_release_phy_lock(bp);
34f80b04
EG
8739
8740 } else /* Only the PMF can access the PHY */
8741 return -EINVAL;
8742 else
c18487ee 8743 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
8744
8745 return rc;
8746}
8747
8748static int bnx2x_get_coalesce(struct net_device *dev,
8749 struct ethtool_coalesce *coal)
8750{
8751 struct bnx2x *bp = netdev_priv(dev);
8752
8753 memset(coal, 0, sizeof(struct ethtool_coalesce));
8754
8755 coal->rx_coalesce_usecs = bp->rx_ticks;
8756 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
8757
8758 return 0;
8759}
8760
8761static int bnx2x_set_coalesce(struct net_device *dev,
8762 struct ethtool_coalesce *coal)
8763{
8764 struct bnx2x *bp = netdev_priv(dev);
8765
8766 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8767 if (bp->rx_ticks > 3000)
8768 bp->rx_ticks = 3000;
8769
8770 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8771 if (bp->tx_ticks > 0x3000)
8772 bp->tx_ticks = 0x3000;
8773
34f80b04 8774 if (netif_running(dev))
a2fbb9ea
ET
8775 bnx2x_update_coalesce(bp);
8776
8777 return 0;
8778}
8779
8780static void bnx2x_get_ringparam(struct net_device *dev,
8781 struct ethtool_ringparam *ering)
8782{
8783 struct bnx2x *bp = netdev_priv(dev);
8784
8785 ering->rx_max_pending = MAX_RX_AVAIL;
8786 ering->rx_mini_max_pending = 0;
8787 ering->rx_jumbo_max_pending = 0;
8788
8789 ering->rx_pending = bp->rx_ring_size;
8790 ering->rx_mini_pending = 0;
8791 ering->rx_jumbo_pending = 0;
8792
8793 ering->tx_max_pending = MAX_TX_AVAIL;
8794 ering->tx_pending = bp->tx_ring_size;
8795}
8796
8797static int bnx2x_set_ringparam(struct net_device *dev,
8798 struct ethtool_ringparam *ering)
8799{
8800 struct bnx2x *bp = netdev_priv(dev);
34f80b04 8801 int rc = 0;
a2fbb9ea
ET
8802
8803 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8804 (ering->tx_pending > MAX_TX_AVAIL) ||
8805 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8806 return -EINVAL;
8807
8808 bp->rx_ring_size = ering->rx_pending;
8809 bp->tx_ring_size = ering->tx_pending;
8810
34f80b04
EG
8811 if (netif_running(dev)) {
8812 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8813 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
8814 }
8815
34f80b04 8816 return rc;
a2fbb9ea
ET
8817}
8818
8819static void bnx2x_get_pauseparam(struct net_device *dev,
8820 struct ethtool_pauseparam *epause)
8821{
8822 struct bnx2x *bp = netdev_priv(dev);
8823
c0700f90 8824 epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
8825 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8826
c0700f90
DM
8827 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8828 BNX2X_FLOW_CTRL_RX);
8829 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8830 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
8831
8832 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8833 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8834 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8835}
8836
8837static int bnx2x_set_pauseparam(struct net_device *dev,
8838 struct ethtool_pauseparam *epause)
8839{
8840 struct bnx2x *bp = netdev_priv(dev);
8841
34f80b04
EG
8842 if (IS_E1HMF(bp))
8843 return 0;
8844
a2fbb9ea
ET
8845 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8846 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8847 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8848
c0700f90 8849 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 8850
f1410647 8851 if (epause->rx_pause)
c0700f90 8852 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 8853
f1410647 8854 if (epause->tx_pause)
c0700f90 8855 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 8856
c0700f90
DM
8857 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8858 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8859
c18487ee 8860 if (epause->autoneg) {
34f80b04 8861 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 8862 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
8863 return -EINVAL;
8864 }
a2fbb9ea 8865
c18487ee 8866 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 8867 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 8868 }
a2fbb9ea 8869
c18487ee
YR
8870 DP(NETIF_MSG_LINK,
8871 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
8872
8873 if (netif_running(dev)) {
bb2a0f7a 8874 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8875 bnx2x_link_set(bp);
8876 }
a2fbb9ea
ET
8877
8878 return 0;
8879}
8880
df0f2343
VZ
8881static int bnx2x_set_flags(struct net_device *dev, u32 data)
8882{
8883 struct bnx2x *bp = netdev_priv(dev);
8884 int changed = 0;
8885 int rc = 0;
8886
8887 /* TPA requires Rx CSUM offloading */
8888 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8889 if (!(dev->features & NETIF_F_LRO)) {
8890 dev->features |= NETIF_F_LRO;
8891 bp->flags |= TPA_ENABLE_FLAG;
8892 changed = 1;
8893 }
8894
8895 } else if (dev->features & NETIF_F_LRO) {
8896 dev->features &= ~NETIF_F_LRO;
8897 bp->flags &= ~TPA_ENABLE_FLAG;
8898 changed = 1;
8899 }
8900
8901 if (changed && netif_running(dev)) {
8902 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8903 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8904 }
8905
8906 return rc;
8907}
8908
a2fbb9ea
ET
8909static u32 bnx2x_get_rx_csum(struct net_device *dev)
8910{
8911 struct bnx2x *bp = netdev_priv(dev);
8912
8913 return bp->rx_csum;
8914}
8915
8916static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8917{
8918 struct bnx2x *bp = netdev_priv(dev);
df0f2343 8919 int rc = 0;
a2fbb9ea
ET
8920
8921 bp->rx_csum = data;
df0f2343
VZ
8922
8923 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8924 TPA'ed packets will be discarded due to wrong TCP CSUM */
8925 if (!data) {
8926 u32 flags = ethtool_op_get_flags(dev);
8927
8928 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8929 }
8930
8931 return rc;
a2fbb9ea
ET
8932}
8933
8934static int bnx2x_set_tso(struct net_device *dev, u32 data)
8935{
755735eb 8936 if (data) {
a2fbb9ea 8937 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8938 dev->features |= NETIF_F_TSO6;
8939 } else {
a2fbb9ea 8940 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8941 dev->features &= ~NETIF_F_TSO6;
8942 }
8943
a2fbb9ea
ET
8944 return 0;
8945}
8946
f3c87cdd 8947static const struct {
a2fbb9ea
ET
8948 char string[ETH_GSTRING_LEN];
8949} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
8950 { "register_test (offline)" },
8951 { "memory_test (offline)" },
8952 { "loopback_test (offline)" },
8953 { "nvram_test (online)" },
8954 { "interrupt_test (online)" },
8955 { "link_test (online)" },
d3d4f495 8956 { "idle check (online)" }
a2fbb9ea
ET
8957};
8958
8959static int bnx2x_self_test_count(struct net_device *dev)
8960{
8961 return BNX2X_NUM_TESTS;
8962}
8963
f3c87cdd
YG
8964static int bnx2x_test_registers(struct bnx2x *bp)
8965{
8966 int idx, i, rc = -ENODEV;
8967 u32 wr_val = 0;
9dabc424 8968 int port = BP_PORT(bp);
f3c87cdd
YG
8969 static const struct {
8970 u32 offset0;
8971 u32 offset1;
8972 u32 mask;
8973 } reg_tbl[] = {
8974/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8975 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8976 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8977 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8978 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8979 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8980 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8981 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8982 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8983 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8984/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8985 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8986 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8987 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8988 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8989 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8990 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8991 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8992 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8993 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8994/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8995 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8996 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8997 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8998 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8999 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9000 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9001 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9002 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9003 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9004/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9005 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9006 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9007 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9008 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9009 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9010 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9011 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9012
9013 { 0xffffffff, 0, 0x00000000 }
9014 };
9015
9016 if (!netif_running(bp->dev))
9017 return rc;
9018
9019 /* Repeat the test twice:
9020 First by writing 0x00000000, second by writing 0xffffffff */
9021 for (idx = 0; idx < 2; idx++) {
9022
9023 switch (idx) {
9024 case 0:
9025 wr_val = 0;
9026 break;
9027 case 1:
9028 wr_val = 0xffffffff;
9029 break;
9030 }
9031
9032 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9033 u32 offset, mask, save_val, val;
f3c87cdd
YG
9034
9035 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9036 mask = reg_tbl[i].mask;
9037
9038 save_val = REG_RD(bp, offset);
9039
9040 REG_WR(bp, offset, wr_val);
9041 val = REG_RD(bp, offset);
9042
9043 /* Restore the original register's value */
9044 REG_WR(bp, offset, save_val);
9045
9046 /* verify that value is as expected value */
9047 if ((val & mask) != (wr_val & mask))
9048 goto test_reg_exit;
9049 }
9050 }
9051
9052 rc = 0;
9053
9054test_reg_exit:
9055 return rc;
9056}
9057
9058static int bnx2x_test_memory(struct bnx2x *bp)
9059{
9060 int i, j, rc = -ENODEV;
9061 u32 val;
9062 static const struct {
9063 u32 offset;
9064 int size;
9065 } mem_tbl[] = {
9066 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9067 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9068 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9069 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9070 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9071 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9072 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9073
9074 { 0xffffffff, 0 }
9075 };
9076 static const struct {
9077 char *name;
9078 u32 offset;
9dabc424
YG
9079 u32 e1_mask;
9080 u32 e1h_mask;
f3c87cdd 9081 } prty_tbl[] = {
9dabc424
YG
9082 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9083 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9084 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9085 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9086 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9087 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9088
9089 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
9090 };
9091
9092 if (!netif_running(bp->dev))
9093 return rc;
9094
9095 /* Go through all the memories */
9096 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9097 for (j = 0; j < mem_tbl[i].size; j++)
9098 REG_RD(bp, mem_tbl[i].offset + j*4);
9099
9100 /* Check the parity status */
9101 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9102 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
9103 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9104 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
9105 DP(NETIF_MSG_HW,
9106 "%s is 0x%x\n", prty_tbl[i].name, val);
9107 goto test_mem_exit;
9108 }
9109 }
9110
9111 rc = 0;
9112
9113test_mem_exit:
9114 return rc;
9115}
9116
f3c87cdd
YG
9117static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9118{
9119 int cnt = 1000;
9120
9121 if (link_up)
9122 while (bnx2x_link_test(bp) && cnt--)
9123 msleep(10);
9124}
9125
9126static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9127{
9128 unsigned int pkt_size, num_pkts, i;
9129 struct sk_buff *skb;
9130 unsigned char *packet;
9131 struct bnx2x_fastpath *fp = &bp->fp[0];
9132 u16 tx_start_idx, tx_idx;
9133 u16 rx_start_idx, rx_idx;
9134 u16 pkt_prod;
9135 struct sw_tx_bd *tx_buf;
9136 struct eth_tx_bd *tx_bd;
9137 dma_addr_t mapping;
9138 union eth_rx_cqe *cqe;
9139 u8 cqe_fp_flags;
9140 struct sw_rx_bd *rx_buf;
9141 u16 len;
9142 int rc = -ENODEV;
9143
9144 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
9145 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 9146 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
f3c87cdd
YG
9147
9148 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
3910c8ae 9149 u16 cnt = 1000;
f3c87cdd 9150 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
f3c87cdd 9151 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
f3c87cdd 9152 /* wait until link state is restored */
3910c8ae
EG
9153 if (link_up)
9154 while (cnt-- && bnx2x_test_link(&bp->link_params,
9155 &bp->link_vars))
9156 msleep(10);
f3c87cdd
YG
9157 } else
9158 return -EINVAL;
9159
9160 pkt_size = 1514;
9161 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9162 if (!skb) {
9163 rc = -ENOMEM;
9164 goto test_loopback_exit;
9165 }
9166 packet = skb_put(skb, pkt_size);
9167 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9168 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9169 for (i = ETH_HLEN; i < pkt_size; i++)
9170 packet[i] = (unsigned char) (i & 0xff);
9171
9172 num_pkts = 0;
9173 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9174 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9175
9176 pkt_prod = fp->tx_pkt_prod++;
9177 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9178 tx_buf->first_bd = fp->tx_bd_prod;
9179 tx_buf->skb = skb;
9180
9181 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9182 mapping = pci_map_single(bp->pdev, skb->data,
9183 skb_headlen(skb), PCI_DMA_TODEVICE);
9184 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9185 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9186 tx_bd->nbd = cpu_to_le16(1);
9187 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9188 tx_bd->vlan = cpu_to_le16(pkt_prod);
9189 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9190 ETH_TX_BD_FLAGS_END_BD);
9191 tx_bd->general_data = ((UNICAST_ADDRESS <<
9192 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9193
58f4c4cf
EG
9194 wmb();
9195
f3c87cdd
YG
9196 fp->hw_tx_prods->bds_prod =
9197 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
9198 mb(); /* FW restriction: must not reorder writing nbd and packets */
9199 fp->hw_tx_prods->packets_prod =
9200 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9201 DOORBELL(bp, FP_IDX(fp), 0);
9202
9203 mmiowb();
9204
9205 num_pkts++;
9206 fp->tx_bd_prod++;
9207 bp->dev->trans_start = jiffies;
9208
9209 udelay(100);
9210
9211 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9212 if (tx_idx != tx_start_idx + num_pkts)
9213 goto test_loopback_exit;
9214
9215 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9216 if (rx_idx != rx_start_idx + num_pkts)
9217 goto test_loopback_exit;
9218
9219 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9220 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9221 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9222 goto test_loopback_rx_exit;
9223
9224 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9225 if (len != pkt_size)
9226 goto test_loopback_rx_exit;
9227
9228 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9229 skb = rx_buf->skb;
9230 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9231 for (i = ETH_HLEN; i < pkt_size; i++)
9232 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9233 goto test_loopback_rx_exit;
9234
9235 rc = 0;
9236
9237test_loopback_rx_exit:
f3c87cdd
YG
9238
9239 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9240 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9241 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9242 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9243
9244 /* Update producers */
9245 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9246 fp->rx_sge_prod);
f3c87cdd
YG
9247
9248test_loopback_exit:
9249 bp->link_params.loopback_mode = LOOPBACK_NONE;
9250
9251 return rc;
9252}
9253
9254static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9255{
9256 int rc = 0;
9257
9258 if (!netif_running(bp->dev))
9259 return BNX2X_LOOPBACK_FAILED;
9260
f8ef6e44 9261 bnx2x_netif_stop(bp, 1);
3910c8ae 9262 bnx2x_acquire_phy_lock(bp);
f3c87cdd
YG
9263
9264 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
9265 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
9266 rc |= BNX2X_MAC_LOOPBACK_FAILED;
9267 }
9268
9269 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
9270 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
9271 rc |= BNX2X_PHY_LOOPBACK_FAILED;
9272 }
9273
3910c8ae 9274 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
9275 bnx2x_netif_start(bp);
9276
9277 return rc;
9278}
9279
9280#define CRC32_RESIDUAL 0xdebb20e3
9281
9282static int bnx2x_test_nvram(struct bnx2x *bp)
9283{
9284 static const struct {
9285 int offset;
9286 int size;
9287 } nvram_tbl[] = {
9288 { 0, 0x14 }, /* bootstrap */
9289 { 0x14, 0xec }, /* dir */
9290 { 0x100, 0x350 }, /* manuf_info */
9291 { 0x450, 0xf0 }, /* feature_info */
9292 { 0x640, 0x64 }, /* upgrade_key_info */
9293 { 0x6a4, 0x64 },
9294 { 0x708, 0x70 }, /* manuf_key_info */
9295 { 0x778, 0x70 },
9296 { 0, 0 }
9297 };
9298 u32 buf[0x350 / 4];
9299 u8 *data = (u8 *)buf;
9300 int i, rc;
9301 u32 magic, csum;
9302
9303 rc = bnx2x_nvram_read(bp, 0, data, 4);
9304 if (rc) {
9305 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
9306 goto test_nvram_exit;
9307 }
9308
9309 magic = be32_to_cpu(buf[0]);
9310 if (magic != 0x669955aa) {
9311 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9312 rc = -ENODEV;
9313 goto test_nvram_exit;
9314 }
9315
9316 for (i = 0; nvram_tbl[i].size; i++) {
9317
9318 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9319 nvram_tbl[i].size);
9320 if (rc) {
9321 DP(NETIF_MSG_PROBE,
9322 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
9323 goto test_nvram_exit;
9324 }
9325
9326 csum = ether_crc_le(nvram_tbl[i].size, data);
9327 if (csum != CRC32_RESIDUAL) {
9328 DP(NETIF_MSG_PROBE,
9329 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9330 rc = -ENODEV;
9331 goto test_nvram_exit;
9332 }
9333 }
9334
9335test_nvram_exit:
9336 return rc;
9337}
9338
9339static int bnx2x_test_intr(struct bnx2x *bp)
9340{
9341 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9342 int i, rc;
9343
9344 if (!netif_running(bp->dev))
9345 return -ENODEV;
9346
8d9c5f34 9347 config->hdr.length = 0;
af246401
EG
9348 if (CHIP_IS_E1(bp))
9349 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9350 else
9351 config->hdr.offset = BP_FUNC(bp);
f3c87cdd
YG
9352 config->hdr.client_id = BP_CL_ID(bp);
9353 config->hdr.reserved1 = 0;
9354
9355 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9356 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9357 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9358 if (rc == 0) {
9359 bp->set_mac_pending++;
9360 for (i = 0; i < 10; i++) {
9361 if (!bp->set_mac_pending)
9362 break;
9363 msleep_interruptible(10);
9364 }
9365 if (i == 10)
9366 rc = -ENODEV;
9367 }
9368
9369 return rc;
9370}
9371
a2fbb9ea
ET
9372static void bnx2x_self_test(struct net_device *dev,
9373 struct ethtool_test *etest, u64 *buf)
9374{
9375 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
9376
9377 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9378
f3c87cdd 9379 if (!netif_running(dev))
a2fbb9ea 9380 return;
a2fbb9ea 9381
33471629 9382 /* offline tests are not supported in MF mode */
f3c87cdd
YG
9383 if (IS_E1HMF(bp))
9384 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9385
9386 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9387 u8 link_up;
9388
9389 link_up = bp->link_vars.link_up;
9390 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9391 bnx2x_nic_load(bp, LOAD_DIAG);
9392 /* wait until link state is restored */
9393 bnx2x_wait_for_link(bp, link_up);
9394
9395 if (bnx2x_test_registers(bp) != 0) {
9396 buf[0] = 1;
9397 etest->flags |= ETH_TEST_FL_FAILED;
9398 }
9399 if (bnx2x_test_memory(bp) != 0) {
9400 buf[1] = 1;
9401 etest->flags |= ETH_TEST_FL_FAILED;
9402 }
9403 buf[2] = bnx2x_test_loopback(bp, link_up);
9404 if (buf[2] != 0)
9405 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 9406
f3c87cdd
YG
9407 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9408 bnx2x_nic_load(bp, LOAD_NORMAL);
9409 /* wait until link state is restored */
9410 bnx2x_wait_for_link(bp, link_up);
9411 }
9412 if (bnx2x_test_nvram(bp) != 0) {
9413 buf[3] = 1;
a2fbb9ea
ET
9414 etest->flags |= ETH_TEST_FL_FAILED;
9415 }
f3c87cdd
YG
9416 if (bnx2x_test_intr(bp) != 0) {
9417 buf[4] = 1;
9418 etest->flags |= ETH_TEST_FL_FAILED;
9419 }
9420 if (bp->port.pmf)
9421 if (bnx2x_link_test(bp) != 0) {
9422 buf[5] = 1;
9423 etest->flags |= ETH_TEST_FL_FAILED;
9424 }
f3c87cdd
YG
9425
9426#ifdef BNX2X_EXTRA_DEBUG
9427 bnx2x_panic_dump(bp);
9428#endif
a2fbb9ea
ET
9429}
9430
de832a55
EG
9431static const struct {
9432 long offset;
9433 int size;
9434 u8 string[ETH_GSTRING_LEN];
9435} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9436/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9437 { Q_STATS_OFFSET32(error_bytes_received_hi),
9438 8, "[%d]: rx_error_bytes" },
9439 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9440 8, "[%d]: rx_ucast_packets" },
9441 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9442 8, "[%d]: rx_mcast_packets" },
9443 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9444 8, "[%d]: rx_bcast_packets" },
9445 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9446 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9447 4, "[%d]: rx_phy_ip_err_discards"},
9448 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9449 4, "[%d]: rx_skb_alloc_discard" },
9450 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9451
9452/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9453 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9454 8, "[%d]: tx_packets" }
9455};
9456
bb2a0f7a
YG
9457static const struct {
9458 long offset;
9459 int size;
9460 u32 flags;
66e855f3
YG
9461#define STATS_FLAGS_PORT 1
9462#define STATS_FLAGS_FUNC 2
de832a55 9463#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 9464 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 9465} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
9466/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9467 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 9468 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 9469 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 9470 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 9471 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 9472 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 9473 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 9474 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 9475 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 9476 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 9477 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 9478 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 9479 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
9480 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9481 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9482 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9483 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9484/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9485 8, STATS_FLAGS_PORT, "rx_fragments" },
9486 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9487 8, STATS_FLAGS_PORT, "rx_jabbers" },
9488 { STATS_OFFSET32(no_buff_discard_hi),
9489 8, STATS_FLAGS_BOTH, "rx_discards" },
9490 { STATS_OFFSET32(mac_filter_discard),
9491 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9492 { STATS_OFFSET32(xxoverflow_discard),
9493 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9494 { STATS_OFFSET32(brb_drop_hi),
9495 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9496 { STATS_OFFSET32(brb_truncate_hi),
9497 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9498 { STATS_OFFSET32(pause_frames_received_hi),
9499 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9500 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9501 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9502 { STATS_OFFSET32(nig_timer_max),
9503 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9504/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9505 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9506 { STATS_OFFSET32(rx_skb_alloc_failed),
9507 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9508 { STATS_OFFSET32(hw_csum_err),
9509 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9510
9511 { STATS_OFFSET32(total_bytes_transmitted_hi),
9512 8, STATS_FLAGS_BOTH, "tx_bytes" },
9513 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9514 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9515 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9516 8, STATS_FLAGS_BOTH, "tx_packets" },
9517 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9518 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9519 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9520 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 9521 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 9522 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 9523 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 9524 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 9525/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 9526 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 9527 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 9528 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 9529 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 9530 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 9531 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 9532 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 9533 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 9534 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 9535 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 9536 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 9537 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 9538 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 9539 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 9540 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 9541 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 9542 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 9543 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 9544 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 9545/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 9546 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
9547 { STATS_OFFSET32(pause_frames_sent_hi),
9548 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
9549};
9550
de832a55
EG
9551#define IS_PORT_STAT(i) \
9552 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9553#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9554#define IS_E1HMF_MODE_STAT(bp) \
9555 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
66e855f3 9556
a2fbb9ea
ET
9557static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9558{
bb2a0f7a 9559 struct bnx2x *bp = netdev_priv(dev);
de832a55 9560 int i, j, k;
bb2a0f7a 9561
a2fbb9ea
ET
9562 switch (stringset) {
9563 case ETH_SS_STATS:
de832a55
EG
9564 if (is_multi(bp)) {
9565 k = 0;
9566 for_each_queue(bp, i) {
9567 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9568 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9569 bnx2x_q_stats_arr[j].string, i);
9570 k += BNX2X_NUM_Q_STATS;
9571 }
9572 if (IS_E1HMF_MODE_STAT(bp))
9573 break;
9574 for (j = 0; j < BNX2X_NUM_STATS; j++)
9575 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9576 bnx2x_stats_arr[j].string);
9577 } else {
9578 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9579 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9580 continue;
9581 strcpy(buf + j*ETH_GSTRING_LEN,
9582 bnx2x_stats_arr[i].string);
9583 j++;
9584 }
bb2a0f7a 9585 }
a2fbb9ea
ET
9586 break;
9587
9588 case ETH_SS_TEST:
9589 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9590 break;
9591 }
9592}
9593
9594static int bnx2x_get_stats_count(struct net_device *dev)
9595{
bb2a0f7a 9596 struct bnx2x *bp = netdev_priv(dev);
de832a55 9597 int i, num_stats;
bb2a0f7a 9598
de832a55
EG
9599 if (is_multi(bp)) {
9600 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9601 if (!IS_E1HMF_MODE_STAT(bp))
9602 num_stats += BNX2X_NUM_STATS;
9603 } else {
9604 if (IS_E1HMF_MODE_STAT(bp)) {
9605 num_stats = 0;
9606 for (i = 0; i < BNX2X_NUM_STATS; i++)
9607 if (IS_FUNC_STAT(i))
9608 num_stats++;
9609 } else
9610 num_stats = BNX2X_NUM_STATS;
bb2a0f7a 9611 }
de832a55 9612
bb2a0f7a 9613 return num_stats;
a2fbb9ea
ET
9614}
9615
9616static void bnx2x_get_ethtool_stats(struct net_device *dev,
9617 struct ethtool_stats *stats, u64 *buf)
9618{
9619 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
9620 u32 *hw_stats, *offset;
9621 int i, j, k;
bb2a0f7a 9622
de832a55
EG
9623 if (is_multi(bp)) {
9624 k = 0;
9625 for_each_queue(bp, i) {
9626 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
9627 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
9628 if (bnx2x_q_stats_arr[j].size == 0) {
9629 /* skip this counter */
9630 buf[k + j] = 0;
9631 continue;
9632 }
9633 offset = (hw_stats +
9634 bnx2x_q_stats_arr[j].offset);
9635 if (bnx2x_q_stats_arr[j].size == 4) {
9636 /* 4-byte counter */
9637 buf[k + j] = (u64) *offset;
9638 continue;
9639 }
9640 /* 8-byte counter */
9641 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9642 }
9643 k += BNX2X_NUM_Q_STATS;
9644 }
9645 if (IS_E1HMF_MODE_STAT(bp))
9646 return;
9647 hw_stats = (u32 *)&bp->eth_stats;
9648 for (j = 0; j < BNX2X_NUM_STATS; j++) {
9649 if (bnx2x_stats_arr[j].size == 0) {
9650 /* skip this counter */
9651 buf[k + j] = 0;
9652 continue;
9653 }
9654 offset = (hw_stats + bnx2x_stats_arr[j].offset);
9655 if (bnx2x_stats_arr[j].size == 4) {
9656 /* 4-byte counter */
9657 buf[k + j] = (u64) *offset;
9658 continue;
9659 }
9660 /* 8-byte counter */
9661 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 9662 }
de832a55
EG
9663 } else {
9664 hw_stats = (u32 *)&bp->eth_stats;
9665 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9666 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9667 continue;
9668 if (bnx2x_stats_arr[i].size == 0) {
9669 /* skip this counter */
9670 buf[j] = 0;
9671 j++;
9672 continue;
9673 }
9674 offset = (hw_stats + bnx2x_stats_arr[i].offset);
9675 if (bnx2x_stats_arr[i].size == 4) {
9676 /* 4-byte counter */
9677 buf[j] = (u64) *offset;
9678 j++;
9679 continue;
9680 }
9681 /* 8-byte counter */
9682 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 9683 j++;
a2fbb9ea 9684 }
a2fbb9ea
ET
9685 }
9686}
9687
9688static int bnx2x_phys_id(struct net_device *dev, u32 data)
9689{
9690 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9691 int port = BP_PORT(bp);
a2fbb9ea
ET
9692 int i;
9693
34f80b04
EG
9694 if (!netif_running(dev))
9695 return 0;
9696
9697 if (!bp->port.pmf)
9698 return 0;
9699
a2fbb9ea
ET
9700 if (data == 0)
9701 data = 2;
9702
9703 for (i = 0; i < (data * 2); i++) {
c18487ee 9704 if ((i % 2) == 0)
34f80b04 9705 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
9706 bp->link_params.hw_led_mode,
9707 bp->link_params.chip_id);
9708 else
34f80b04 9709 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
9710 bp->link_params.hw_led_mode,
9711 bp->link_params.chip_id);
9712
a2fbb9ea
ET
9713 msleep_interruptible(500);
9714 if (signal_pending(current))
9715 break;
9716 }
9717
c18487ee 9718 if (bp->link_vars.link_up)
34f80b04 9719 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
9720 bp->link_vars.line_speed,
9721 bp->link_params.hw_led_mode,
9722 bp->link_params.chip_id);
a2fbb9ea
ET
9723
9724 return 0;
9725}
9726
9727static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
9728 .get_settings = bnx2x_get_settings,
9729 .set_settings = bnx2x_set_settings,
9730 .get_drvinfo = bnx2x_get_drvinfo,
a2fbb9ea
ET
9731 .get_wol = bnx2x_get_wol,
9732 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
9733 .get_msglevel = bnx2x_get_msglevel,
9734 .set_msglevel = bnx2x_set_msglevel,
9735 .nway_reset = bnx2x_nway_reset,
9736 .get_link = ethtool_op_get_link,
9737 .get_eeprom_len = bnx2x_get_eeprom_len,
9738 .get_eeprom = bnx2x_get_eeprom,
9739 .set_eeprom = bnx2x_set_eeprom,
9740 .get_coalesce = bnx2x_get_coalesce,
9741 .set_coalesce = bnx2x_set_coalesce,
9742 .get_ringparam = bnx2x_get_ringparam,
9743 .set_ringparam = bnx2x_set_ringparam,
9744 .get_pauseparam = bnx2x_get_pauseparam,
9745 .set_pauseparam = bnx2x_set_pauseparam,
9746 .get_rx_csum = bnx2x_get_rx_csum,
9747 .set_rx_csum = bnx2x_set_rx_csum,
9748 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 9749 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
9750 .set_flags = bnx2x_set_flags,
9751 .get_flags = ethtool_op_get_flags,
9752 .get_sg = ethtool_op_get_sg,
9753 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
9754 .get_tso = ethtool_op_get_tso,
9755 .set_tso = bnx2x_set_tso,
9756 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
9757 .self_test = bnx2x_self_test,
9758 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
9759 .phys_id = bnx2x_phys_id,
9760 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 9761 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
9762};
9763
9764/* end of ethtool_ops */
9765
9766/****************************************************************************
9767* General service functions
9768****************************************************************************/
9769
9770static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9771{
9772 u16 pmcsr;
9773
9774 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9775
9776 switch (state) {
9777 case PCI_D0:
34f80b04 9778 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
9779 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9780 PCI_PM_CTRL_PME_STATUS));
9781
9782 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 9783 /* delay required during transition out of D3hot */
a2fbb9ea 9784 msleep(20);
34f80b04 9785 break;
a2fbb9ea 9786
34f80b04
EG
9787 case PCI_D3hot:
9788 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9789 pmcsr |= 3;
a2fbb9ea 9790
34f80b04
EG
9791 if (bp->wol)
9792 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 9793
34f80b04
EG
9794 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9795 pmcsr);
a2fbb9ea 9796
34f80b04
EG
9797 /* No more memory access after this point until
9798 * device is brought back to D0.
9799 */
9800 break;
9801
9802 default:
9803 return -EINVAL;
9804 }
9805 return 0;
a2fbb9ea
ET
9806}
9807
237907c1
EG
9808static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
9809{
9810 u16 rx_cons_sb;
9811
9812 /* Tell compiler that status block fields can change */
9813 barrier();
9814 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9815 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9816 rx_cons_sb++;
9817 return (fp->rx_comp_cons != rx_cons_sb);
9818}
9819
34f80b04
EG
9820/*
9821 * net_device service functions
9822 */
9823
a2fbb9ea
ET
9824static int bnx2x_poll(struct napi_struct *napi, int budget)
9825{
9826 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9827 napi);
9828 struct bnx2x *bp = fp->bp;
9829 int work_done = 0;
9830
9831#ifdef BNX2X_STOP_ON_ERROR
9832 if (unlikely(bp->panic))
34f80b04 9833 goto poll_panic;
a2fbb9ea
ET
9834#endif
9835
9836 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9837 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9838 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9839
9840 bnx2x_update_fpsb_idx(fp);
9841
237907c1 9842 if (bnx2x_has_tx_work(fp))
a2fbb9ea
ET
9843 bnx2x_tx_int(fp, budget);
9844
237907c1 9845 if (bnx2x_has_rx_work(fp))
a2fbb9ea 9846 work_done = bnx2x_rx_int(fp, budget);
da5a662a 9847 rmb(); /* BNX2X_HAS_WORK() reads the status block */
a2fbb9ea
ET
9848
9849 /* must not complete if we consumed full budget */
da5a662a 9850 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
a2fbb9ea
ET
9851
9852#ifdef BNX2X_STOP_ON_ERROR
34f80b04 9853poll_panic:
a2fbb9ea 9854#endif
288379f0 9855 napi_complete(napi);
a2fbb9ea 9856
34f80b04 9857 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
a2fbb9ea 9858 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
34f80b04 9859 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
a2fbb9ea
ET
9860 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9861 }
a2fbb9ea
ET
9862 return work_done;
9863}
9864
755735eb
EG
9865
9866/* we split the first BD into headers and data BDs
33471629 9867 * to ease the pain of our fellow microcode engineers
755735eb
EG
9868 * we use one mapping for both BDs
9869 * So far this has only been observed to happen
9870 * in Other Operating Systems(TM)
9871 */
9872static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9873 struct bnx2x_fastpath *fp,
9874 struct eth_tx_bd **tx_bd, u16 hlen,
9875 u16 bd_prod, int nbd)
9876{
9877 struct eth_tx_bd *h_tx_bd = *tx_bd;
9878 struct eth_tx_bd *d_tx_bd;
9879 dma_addr_t mapping;
9880 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9881
9882 /* first fix first BD */
9883 h_tx_bd->nbd = cpu_to_le16(nbd);
9884 h_tx_bd->nbytes = cpu_to_le16(hlen);
9885
9886 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9887 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9888 h_tx_bd->addr_lo, h_tx_bd->nbd);
9889
9890 /* now get a new data BD
9891 * (after the pbd) and fill it */
9892 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9893 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9894
9895 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9896 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9897
9898 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9899 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9900 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9901 d_tx_bd->vlan = 0;
9902 /* this marks the BD as one that has no individual mapping
9903 * the FW ignores this flag in a BD not marked start
9904 */
9905 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9906 DP(NETIF_MSG_TX_QUEUED,
9907 "TSO split data size is %d (%x:%x)\n",
9908 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9909
9910 /* update tx_bd for marking the last BD flag */
9911 *tx_bd = d_tx_bd;
9912
9913 return bd_prod;
9914}
9915
9916static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9917{
9918 if (fix > 0)
9919 csum = (u16) ~csum_fold(csum_sub(csum,
9920 csum_partial(t_header - fix, fix, 0)));
9921
9922 else if (fix < 0)
9923 csum = (u16) ~csum_fold(csum_add(csum,
9924 csum_partial(t_header, -fix, 0)));
9925
9926 return swab16(csum);
9927}
9928
9929static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9930{
9931 u32 rc;
9932
9933 if (skb->ip_summed != CHECKSUM_PARTIAL)
9934 rc = XMIT_PLAIN;
9935
9936 else {
9937 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9938 rc = XMIT_CSUM_V6;
9939 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9940 rc |= XMIT_CSUM_TCP;
9941
9942 } else {
9943 rc = XMIT_CSUM_V4;
9944 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9945 rc |= XMIT_CSUM_TCP;
9946 }
9947 }
9948
9949 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9950 rc |= XMIT_GSO_V4;
9951
9952 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9953 rc |= XMIT_GSO_V6;
9954
9955 return rc;
9956}
9957
632da4d6 9958#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
755735eb
EG
9959/* check if packet requires linearization (packet is too fragmented) */
9960static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9961 u32 xmit_type)
9962{
9963 int to_copy = 0;
9964 int hlen = 0;
9965 int first_bd_sz = 0;
9966
9967 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9968 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9969
9970 if (xmit_type & XMIT_GSO) {
9971 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9972 /* Check if LSO packet needs to be copied:
9973 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9974 int wnd_size = MAX_FETCH_BD - 3;
33471629 9975 /* Number of windows to check */
755735eb
EG
9976 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9977 int wnd_idx = 0;
9978 int frag_idx = 0;
9979 u32 wnd_sum = 0;
9980
9981 /* Headers length */
9982 hlen = (int)(skb_transport_header(skb) - skb->data) +
9983 tcp_hdrlen(skb);
9984
9985 /* Amount of data (w/o headers) on linear part of SKB*/
9986 first_bd_sz = skb_headlen(skb) - hlen;
9987
9988 wnd_sum = first_bd_sz;
9989
9990 /* Calculate the first sum - it's special */
9991 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9992 wnd_sum +=
9993 skb_shinfo(skb)->frags[frag_idx].size;
9994
9995 /* If there was data on linear skb data - check it */
9996 if (first_bd_sz > 0) {
9997 if (unlikely(wnd_sum < lso_mss)) {
9998 to_copy = 1;
9999 goto exit_lbl;
10000 }
10001
10002 wnd_sum -= first_bd_sz;
10003 }
10004
10005 /* Others are easier: run through the frag list and
10006 check all windows */
10007 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10008 wnd_sum +=
10009 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10010
10011 if (unlikely(wnd_sum < lso_mss)) {
10012 to_copy = 1;
10013 break;
10014 }
10015 wnd_sum -=
10016 skb_shinfo(skb)->frags[wnd_idx].size;
10017 }
10018
10019 } else {
10020 /* in non-LSO too fragmented packet should always
10021 be linearized */
10022 to_copy = 1;
10023 }
10024 }
10025
10026exit_lbl:
10027 if (unlikely(to_copy))
10028 DP(NETIF_MSG_TX_QUEUED,
10029 "Linearization IS REQUIRED for %s packet. "
10030 "num_frags %d hlen %d first_bd_sz %d\n",
10031 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10032 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10033
10034 return to_copy;
10035}
632da4d6 10036#endif
755735eb
EG
10037
10038/* called with netif_tx_lock
a2fbb9ea 10039 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 10040 * netif_wake_queue()
a2fbb9ea
ET
10041 */
10042static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10043{
10044 struct bnx2x *bp = netdev_priv(dev);
10045 struct bnx2x_fastpath *fp;
555f6c78 10046 struct netdev_queue *txq;
a2fbb9ea
ET
10047 struct sw_tx_bd *tx_buf;
10048 struct eth_tx_bd *tx_bd;
10049 struct eth_tx_parse_bd *pbd = NULL;
10050 u16 pkt_prod, bd_prod;
755735eb 10051 int nbd, fp_index;
a2fbb9ea 10052 dma_addr_t mapping;
755735eb
EG
10053 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10054 int vlan_off = (bp->e1hov ? 4 : 0);
10055 int i;
10056 u8 hlen = 0;
a2fbb9ea
ET
10057
10058#ifdef BNX2X_STOP_ON_ERROR
10059 if (unlikely(bp->panic))
10060 return NETDEV_TX_BUSY;
10061#endif
10062
555f6c78
EG
10063 fp_index = skb_get_queue_mapping(skb);
10064 txq = netdev_get_tx_queue(dev, fp_index);
10065
a2fbb9ea 10066 fp = &bp->fp[fp_index];
755735eb 10067
231fd58a 10068 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
de832a55 10069 fp->eth_q_stats.driver_xoff++,
555f6c78 10070 netif_tx_stop_queue(txq);
a2fbb9ea
ET
10071 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10072 return NETDEV_TX_BUSY;
10073 }
10074
755735eb
EG
10075 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10076 " gso type %x xmit_type %x\n",
10077 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10078 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10079
632da4d6 10080#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
33471629 10081 /* First, check if we need to linearize the skb
755735eb
EG
10082 (due to FW restrictions) */
10083 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10084 /* Statistics of linearization */
10085 bp->lin_cnt++;
10086 if (skb_linearize(skb) != 0) {
10087 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10088 "silently dropping this SKB\n");
10089 dev_kfree_skb_any(skb);
da5a662a 10090 return NETDEV_TX_OK;
755735eb
EG
10091 }
10092 }
632da4d6 10093#endif
755735eb 10094
a2fbb9ea 10095 /*
755735eb 10096 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 10097 then for TSO or xsum we have a parsing info BD,
755735eb 10098 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
10099 (don't forget to mark the last one as last,
10100 and to unmap only AFTER you write to the BD ...)
755735eb 10101 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
10102 */
10103
10104 pkt_prod = fp->tx_pkt_prod++;
755735eb 10105 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 10106
755735eb 10107 /* get a tx_buf and first BD */
a2fbb9ea
ET
10108 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10109 tx_bd = &fp->tx_desc_ring[bd_prod];
10110
10111 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10112 tx_bd->general_data = (UNICAST_ADDRESS <<
10113 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a
EG
10114 /* header nbd */
10115 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
a2fbb9ea 10116
755735eb
EG
10117 /* remember the first BD of the packet */
10118 tx_buf->first_bd = fp->tx_bd_prod;
10119 tx_buf->skb = skb;
a2fbb9ea
ET
10120
10121 DP(NETIF_MSG_TX_QUEUED,
10122 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10123 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10124
0c6671b0
EG
10125#ifdef BCM_VLAN
10126 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10127 (bp->flags & HW_VLAN_TX_FLAG)) {
755735eb
EG
10128 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10129 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10130 vlan_off += 4;
10131 } else
0c6671b0 10132#endif
755735eb 10133 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 10134
755735eb 10135 if (xmit_type) {
755735eb 10136 /* turn on parsing and get a BD */
a2fbb9ea
ET
10137 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10138 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
10139
10140 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10141 }
10142
10143 if (xmit_type & XMIT_CSUM) {
10144 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
10145
10146 /* for now NS flag is not used in Linux */
755735eb 10147 pbd->global_data = (hlen |
96fc1784 10148 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
a2fbb9ea 10149 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 10150
755735eb
EG
10151 pbd->ip_hlen = (skb_transport_header(skb) -
10152 skb_network_header(skb)) / 2;
10153
10154 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 10155
755735eb
EG
10156 pbd->total_hlen = cpu_to_le16(hlen);
10157 hlen = hlen*2 - vlan_off;
a2fbb9ea 10158
755735eb
EG
10159 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10160
10161 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 10162 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
10163 ETH_TX_BD_FLAGS_IP_CSUM;
10164 else
10165 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10166
10167 if (xmit_type & XMIT_CSUM_TCP) {
10168 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10169
10170 } else {
10171 s8 fix = SKB_CS_OFF(skb); /* signed! */
10172
a2fbb9ea 10173 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 10174 pbd->cs_offset = fix / 2;
a2fbb9ea 10175
755735eb
EG
10176 DP(NETIF_MSG_TX_QUEUED,
10177 "hlen %d offset %d fix %d csum before fix %x\n",
10178 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10179 SKB_CS(skb));
10180
10181 /* HW bug: fixup the CSUM */
10182 pbd->tcp_pseudo_csum =
10183 bnx2x_csum_fix(skb_transport_header(skb),
10184 SKB_CS(skb), fix);
10185
10186 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10187 pbd->tcp_pseudo_csum);
10188 }
a2fbb9ea
ET
10189 }
10190
10191 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 10192 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
10193
10194 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10195 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6378c025 10196 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
a2fbb9ea
ET
10197 tx_bd->nbd = cpu_to_le16(nbd);
10198 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10199
10200 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
10201 " nbytes %d flags %x vlan %x\n",
10202 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10203 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10204 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 10205
755735eb 10206 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
10207
10208 DP(NETIF_MSG_TX_QUEUED,
10209 "TSO packet len %d hlen %d total len %d tso size %d\n",
10210 skb->len, hlen, skb_headlen(skb),
10211 skb_shinfo(skb)->gso_size);
10212
10213 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10214
755735eb
EG
10215 if (unlikely(skb_headlen(skb) > hlen))
10216 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10217 bd_prod, ++nbd);
a2fbb9ea
ET
10218
10219 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10220 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
10221 pbd->tcp_flags = pbd_tcp_flags(skb);
10222
10223 if (xmit_type & XMIT_GSO_V4) {
10224 pbd->ip_id = swab16(ip_hdr(skb)->id);
10225 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
10226 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10227 ip_hdr(skb)->daddr,
10228 0, IPPROTO_TCP, 0));
755735eb
EG
10229
10230 } else
10231 pbd->tcp_pseudo_csum =
10232 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10233 &ipv6_hdr(skb)->daddr,
10234 0, IPPROTO_TCP, 0));
10235
a2fbb9ea
ET
10236 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10237 }
10238
755735eb
EG
10239 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10240 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 10241
755735eb
EG
10242 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10243 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 10244
755735eb
EG
10245 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10246 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 10247
755735eb
EG
10248 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10249 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10250 tx_bd->nbytes = cpu_to_le16(frag->size);
10251 tx_bd->vlan = cpu_to_le16(pkt_prod);
10252 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 10253
755735eb
EG
10254 DP(NETIF_MSG_TX_QUEUED,
10255 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
10256 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10257 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
10258 }
10259
755735eb 10260 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
10261 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10262
10263 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
10264 tx_bd, tx_bd->bd_flags.as_bitfield);
10265
a2fbb9ea
ET
10266 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10267
755735eb 10268 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
10269 * if the packet contains or ends with it
10270 */
10271 if (TX_BD_POFF(bd_prod) < nbd)
10272 nbd++;
10273
10274 if (pbd)
10275 DP(NETIF_MSG_TX_QUEUED,
10276 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10277 " tcp_flags %x xsum %x seq %u hlen %u\n",
10278 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10279 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 10280 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 10281
755735eb 10282 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 10283
58f4c4cf
EG
10284 /*
10285 * Make sure that the BD data is updated before updating the producer
10286 * since FW might read the BD right after the producer is updated.
10287 * This is only applicable for weak-ordered memory model archs such
10288 * as IA-64. The following barrier is also mandatory since FW will
10289 * assumes packets must have BDs.
10290 */
10291 wmb();
10292
96fc1784
ET
10293 fp->hw_tx_prods->bds_prod =
10294 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
a2fbb9ea 10295 mb(); /* FW restriction: must not reorder writing nbd and packets */
96fc1784
ET
10296 fp->hw_tx_prods->packets_prod =
10297 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
755735eb 10298 DOORBELL(bp, FP_IDX(fp), 0);
a2fbb9ea
ET
10299
10300 mmiowb();
10301
755735eb 10302 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
10303 dev->trans_start = jiffies;
10304
10305 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
58f4c4cf
EG
10306 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10307 if we put Tx into XOFF state. */
10308 smp_mb();
555f6c78 10309 netif_tx_stop_queue(txq);
de832a55 10310 fp->eth_q_stats.driver_xoff++;
a2fbb9ea 10311 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 10312 netif_tx_wake_queue(txq);
a2fbb9ea
ET
10313 }
10314 fp->tx_pkt++;
10315
10316 return NETDEV_TX_OK;
10317}
10318
bb2a0f7a 10319/* called with rtnl_lock */
a2fbb9ea
ET
10320static int bnx2x_open(struct net_device *dev)
10321{
10322 struct bnx2x *bp = netdev_priv(dev);
10323
6eccabb3
EG
10324 netif_carrier_off(dev);
10325
a2fbb9ea
ET
10326 bnx2x_set_power_state(bp, PCI_D0);
10327
bb2a0f7a 10328 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
10329}
10330
bb2a0f7a 10331/* called with rtnl_lock */
a2fbb9ea
ET
10332static int bnx2x_close(struct net_device *dev)
10333{
a2fbb9ea
ET
10334 struct bnx2x *bp = netdev_priv(dev);
10335
10336 /* Unload the driver, release IRQs */
bb2a0f7a
YG
10337 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10338 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10339 if (!CHIP_REV_IS_SLOW(bp))
10340 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
10341
10342 return 0;
10343}
10344
34f80b04
EG
10345/* called with netif_tx_lock from set_multicast */
10346static void bnx2x_set_rx_mode(struct net_device *dev)
10347{
10348 struct bnx2x *bp = netdev_priv(dev);
10349 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10350 int port = BP_PORT(bp);
10351
10352 if (bp->state != BNX2X_STATE_OPEN) {
10353 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10354 return;
10355 }
10356
10357 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10358
10359 if (dev->flags & IFF_PROMISC)
10360 rx_mode = BNX2X_RX_MODE_PROMISC;
10361
10362 else if ((dev->flags & IFF_ALLMULTI) ||
10363 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10364 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10365
10366 else { /* some multicasts */
10367 if (CHIP_IS_E1(bp)) {
10368 int i, old, offset;
10369 struct dev_mc_list *mclist;
10370 struct mac_configuration_cmd *config =
10371 bnx2x_sp(bp, mcast_config);
10372
10373 for (i = 0, mclist = dev->mc_list;
10374 mclist && (i < dev->mc_count);
10375 i++, mclist = mclist->next) {
10376
10377 config->config_table[i].
10378 cam_entry.msb_mac_addr =
10379 swab16(*(u16 *)&mclist->dmi_addr[0]);
10380 config->config_table[i].
10381 cam_entry.middle_mac_addr =
10382 swab16(*(u16 *)&mclist->dmi_addr[2]);
10383 config->config_table[i].
10384 cam_entry.lsb_mac_addr =
10385 swab16(*(u16 *)&mclist->dmi_addr[4]);
10386 config->config_table[i].cam_entry.flags =
10387 cpu_to_le16(port);
10388 config->config_table[i].
10389 target_table_entry.flags = 0;
10390 config->config_table[i].
10391 target_table_entry.client_id = 0;
10392 config->config_table[i].
10393 target_table_entry.vlan_id = 0;
10394
10395 DP(NETIF_MSG_IFUP,
10396 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10397 config->config_table[i].
10398 cam_entry.msb_mac_addr,
10399 config->config_table[i].
10400 cam_entry.middle_mac_addr,
10401 config->config_table[i].
10402 cam_entry.lsb_mac_addr);
10403 }
8d9c5f34 10404 old = config->hdr.length;
34f80b04
EG
10405 if (old > i) {
10406 for (; i < old; i++) {
10407 if (CAM_IS_INVALID(config->
10408 config_table[i])) {
af246401 10409 /* already invalidated */
34f80b04
EG
10410 break;
10411 }
10412 /* invalidate */
10413 CAM_INVALIDATE(config->
10414 config_table[i]);
10415 }
10416 }
10417
10418 if (CHIP_REV_IS_SLOW(bp))
10419 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10420 else
10421 offset = BNX2X_MAX_MULTICAST*(1 + port);
10422
8d9c5f34 10423 config->hdr.length = i;
34f80b04 10424 config->hdr.offset = offset;
8d9c5f34 10425 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
10426 config->hdr.reserved1 = 0;
10427
10428 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10429 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10430 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10431 0);
10432 } else { /* E1H */
10433 /* Accept one or more multicasts */
10434 struct dev_mc_list *mclist;
10435 u32 mc_filter[MC_HASH_SIZE];
10436 u32 crc, bit, regidx;
10437 int i;
10438
10439 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10440
10441 for (i = 0, mclist = dev->mc_list;
10442 mclist && (i < dev->mc_count);
10443 i++, mclist = mclist->next) {
10444
7c510e4b
JB
10445 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10446 mclist->dmi_addr);
34f80b04
EG
10447
10448 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10449 bit = (crc >> 24) & 0xff;
10450 regidx = bit >> 5;
10451 bit &= 0x1f;
10452 mc_filter[regidx] |= (1 << bit);
10453 }
10454
10455 for (i = 0; i < MC_HASH_SIZE; i++)
10456 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10457 mc_filter[i]);
10458 }
10459 }
10460
10461 bp->rx_mode = rx_mode;
10462 bnx2x_set_storm_rx_mode(bp);
10463}
10464
10465/* called with rtnl_lock */
a2fbb9ea
ET
10466static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10467{
10468 struct sockaddr *addr = p;
10469 struct bnx2x *bp = netdev_priv(dev);
10470
34f80b04 10471 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
10472 return -EINVAL;
10473
10474 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
10475 if (netif_running(dev)) {
10476 if (CHIP_IS_E1(bp))
3101c2bc 10477 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 10478 else
3101c2bc 10479 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 10480 }
a2fbb9ea
ET
10481
10482 return 0;
10483}
10484
c18487ee 10485/* called with rtnl_lock */
a2fbb9ea
ET
10486static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10487{
10488 struct mii_ioctl_data *data = if_mii(ifr);
10489 struct bnx2x *bp = netdev_priv(dev);
3196a88a 10490 int port = BP_PORT(bp);
a2fbb9ea
ET
10491 int err;
10492
10493 switch (cmd) {
10494 case SIOCGMIIPHY:
34f80b04 10495 data->phy_id = bp->port.phy_addr;
a2fbb9ea 10496
c14423fe 10497 /* fallthrough */
c18487ee 10498
a2fbb9ea 10499 case SIOCGMIIREG: {
c18487ee 10500 u16 mii_regval;
a2fbb9ea 10501
c18487ee
YR
10502 if (!netif_running(dev))
10503 return -EAGAIN;
a2fbb9ea 10504
34f80b04 10505 mutex_lock(&bp->port.phy_mutex);
3196a88a 10506 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10507 DEFAULT_PHY_DEV_ADDR,
10508 (data->reg_num & 0x1f), &mii_regval);
10509 data->val_out = mii_regval;
34f80b04 10510 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10511 return err;
10512 }
10513
10514 case SIOCSMIIREG:
10515 if (!capable(CAP_NET_ADMIN))
10516 return -EPERM;
10517
c18487ee
YR
10518 if (!netif_running(dev))
10519 return -EAGAIN;
10520
34f80b04 10521 mutex_lock(&bp->port.phy_mutex);
3196a88a 10522 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10523 DEFAULT_PHY_DEV_ADDR,
10524 (data->reg_num & 0x1f), data->val_in);
34f80b04 10525 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10526 return err;
10527
10528 default:
10529 /* do nothing */
10530 break;
10531 }
10532
10533 return -EOPNOTSUPP;
10534}
10535
34f80b04 10536/* called with rtnl_lock */
a2fbb9ea
ET
10537static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10538{
10539 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10540 int rc = 0;
a2fbb9ea
ET
10541
10542 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10543 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10544 return -EINVAL;
10545
10546 /* This does not race with packet allocation
c14423fe 10547 * because the actual alloc size is
a2fbb9ea
ET
10548 * only updated as part of load
10549 */
10550 dev->mtu = new_mtu;
10551
10552 if (netif_running(dev)) {
34f80b04
EG
10553 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10554 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 10555 }
34f80b04
EG
10556
10557 return rc;
a2fbb9ea
ET
10558}
10559
10560static void bnx2x_tx_timeout(struct net_device *dev)
10561{
10562 struct bnx2x *bp = netdev_priv(dev);
10563
10564#ifdef BNX2X_STOP_ON_ERROR
10565 if (!bp->panic)
10566 bnx2x_panic();
10567#endif
10568 /* This allows the netif to be shutdown gracefully before resetting */
10569 schedule_work(&bp->reset_task);
10570}
10571
10572#ifdef BCM_VLAN
34f80b04 10573/* called with rtnl_lock */
a2fbb9ea
ET
10574static void bnx2x_vlan_rx_register(struct net_device *dev,
10575 struct vlan_group *vlgrp)
10576{
10577 struct bnx2x *bp = netdev_priv(dev);
10578
10579 bp->vlgrp = vlgrp;
0c6671b0
EG
10580
10581 /* Set flags according to the required capabilities */
10582 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10583
10584 if (dev->features & NETIF_F_HW_VLAN_TX)
10585 bp->flags |= HW_VLAN_TX_FLAG;
10586
10587 if (dev->features & NETIF_F_HW_VLAN_RX)
10588 bp->flags |= HW_VLAN_RX_FLAG;
10589
a2fbb9ea 10590 if (netif_running(dev))
49d66772 10591 bnx2x_set_client_config(bp);
a2fbb9ea 10592}
34f80b04 10593
a2fbb9ea
ET
10594#endif
10595
10596#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10597static void poll_bnx2x(struct net_device *dev)
10598{
10599 struct bnx2x *bp = netdev_priv(dev);
10600
10601 disable_irq(bp->pdev->irq);
10602 bnx2x_interrupt(bp->pdev->irq, dev);
10603 enable_irq(bp->pdev->irq);
10604}
10605#endif
10606
c64213cd
SH
10607static const struct net_device_ops bnx2x_netdev_ops = {
10608 .ndo_open = bnx2x_open,
10609 .ndo_stop = bnx2x_close,
10610 .ndo_start_xmit = bnx2x_start_xmit,
10611 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10612 .ndo_set_mac_address = bnx2x_change_mac_addr,
10613 .ndo_validate_addr = eth_validate_addr,
10614 .ndo_do_ioctl = bnx2x_ioctl,
10615 .ndo_change_mtu = bnx2x_change_mtu,
10616 .ndo_tx_timeout = bnx2x_tx_timeout,
10617#ifdef BCM_VLAN
10618 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10619#endif
10620#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10621 .ndo_poll_controller = poll_bnx2x,
10622#endif
10623};
10624
10625
34f80b04
EG
10626static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10627 struct net_device *dev)
a2fbb9ea
ET
10628{
10629 struct bnx2x *bp;
10630 int rc;
10631
10632 SET_NETDEV_DEV(dev, &pdev->dev);
10633 bp = netdev_priv(dev);
10634
34f80b04
EG
10635 bp->dev = dev;
10636 bp->pdev = pdev;
a2fbb9ea 10637 bp->flags = 0;
34f80b04 10638 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
10639
10640 rc = pci_enable_device(pdev);
10641 if (rc) {
10642 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10643 goto err_out;
10644 }
10645
10646 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10647 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10648 " aborting\n");
10649 rc = -ENODEV;
10650 goto err_out_disable;
10651 }
10652
10653 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10654 printk(KERN_ERR PFX "Cannot find second PCI device"
10655 " base address, aborting\n");
10656 rc = -ENODEV;
10657 goto err_out_disable;
10658 }
10659
34f80b04
EG
10660 if (atomic_read(&pdev->enable_cnt) == 1) {
10661 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10662 if (rc) {
10663 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10664 " aborting\n");
10665 goto err_out_disable;
10666 }
a2fbb9ea 10667
34f80b04
EG
10668 pci_set_master(pdev);
10669 pci_save_state(pdev);
10670 }
a2fbb9ea
ET
10671
10672 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10673 if (bp->pm_cap == 0) {
10674 printk(KERN_ERR PFX "Cannot find power management"
10675 " capability, aborting\n");
10676 rc = -EIO;
10677 goto err_out_release;
10678 }
10679
10680 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10681 if (bp->pcie_cap == 0) {
10682 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10683 " aborting\n");
10684 rc = -EIO;
10685 goto err_out_release;
10686 }
10687
10688 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10689 bp->flags |= USING_DAC_FLAG;
10690 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10691 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10692 " failed, aborting\n");
10693 rc = -EIO;
10694 goto err_out_release;
10695 }
10696
10697 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10698 printk(KERN_ERR PFX "System does not support DMA,"
10699 " aborting\n");
10700 rc = -EIO;
10701 goto err_out_release;
10702 }
10703
34f80b04
EG
10704 dev->mem_start = pci_resource_start(pdev, 0);
10705 dev->base_addr = dev->mem_start;
10706 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
10707
10708 dev->irq = pdev->irq;
10709
275f165f 10710 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
10711 if (!bp->regview) {
10712 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10713 rc = -ENOMEM;
10714 goto err_out_release;
10715 }
10716
34f80b04
EG
10717 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10718 min_t(u64, BNX2X_DB_SIZE,
10719 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
10720 if (!bp->doorbells) {
10721 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10722 rc = -ENOMEM;
10723 goto err_out_unmap;
10724 }
10725
10726 bnx2x_set_power_state(bp, PCI_D0);
10727
34f80b04
EG
10728 /* clean indirect addresses */
10729 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10730 PCICFG_VENDOR_ID_OFFSET);
10731 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10732 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10733 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10734 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 10735
34f80b04 10736 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 10737
c64213cd 10738 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 10739 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
10740 dev->features |= NETIF_F_SG;
10741 dev->features |= NETIF_F_HW_CSUM;
10742 if (bp->flags & USING_DAC_FLAG)
10743 dev->features |= NETIF_F_HIGHDMA;
10744#ifdef BCM_VLAN
10745 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 10746 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
34f80b04
EG
10747#endif
10748 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 10749 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
10750
10751 return 0;
10752
10753err_out_unmap:
10754 if (bp->regview) {
10755 iounmap(bp->regview);
10756 bp->regview = NULL;
10757 }
a2fbb9ea
ET
10758 if (bp->doorbells) {
10759 iounmap(bp->doorbells);
10760 bp->doorbells = NULL;
10761 }
10762
10763err_out_release:
34f80b04
EG
10764 if (atomic_read(&pdev->enable_cnt) == 1)
10765 pci_release_regions(pdev);
a2fbb9ea
ET
10766
10767err_out_disable:
10768 pci_disable_device(pdev);
10769 pci_set_drvdata(pdev, NULL);
10770
10771err_out:
10772 return rc;
10773}
10774
25047950
ET
10775static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10776{
10777 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10778
10779 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10780 return val;
10781}
10782
10783/* return value of 1=2.5GHz 2=5GHz */
10784static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10785{
10786 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10787
10788 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10789 return val;
10790}
10791
a2fbb9ea
ET
10792static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10793 const struct pci_device_id *ent)
10794{
10795 static int version_printed;
10796 struct net_device *dev = NULL;
10797 struct bnx2x *bp;
25047950 10798 int rc;
a2fbb9ea
ET
10799
10800 if (version_printed++ == 0)
10801 printk(KERN_INFO "%s", version);
10802
10803 /* dev zeroed in init_etherdev */
555f6c78 10804 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
10805 if (!dev) {
10806 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 10807 return -ENOMEM;
34f80b04 10808 }
a2fbb9ea 10809
a2fbb9ea
ET
10810 bp = netdev_priv(dev);
10811 bp->msglevel = debug;
10812
34f80b04 10813 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
10814 if (rc < 0) {
10815 free_netdev(dev);
10816 return rc;
10817 }
10818
a2fbb9ea
ET
10819 pci_set_drvdata(pdev, dev);
10820
34f80b04 10821 rc = bnx2x_init_bp(bp);
693fc0d1
EG
10822 if (rc)
10823 goto init_one_exit;
10824
10825 rc = register_netdev(dev);
34f80b04 10826 if (rc) {
693fc0d1 10827 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
10828 goto init_one_exit;
10829 }
10830
10831 bp->common.name = board_info[ent->driver_data].name;
25047950 10832 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
34f80b04
EG
10833 " IRQ %d, ", dev->name, bp->common.name,
10834 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
10835 bnx2x_get_pcie_width(bp),
10836 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10837 dev->base_addr, bp->pdev->irq);
e174961c 10838 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
a2fbb9ea 10839 return 0;
34f80b04
EG
10840
10841init_one_exit:
10842 if (bp->regview)
10843 iounmap(bp->regview);
10844
10845 if (bp->doorbells)
10846 iounmap(bp->doorbells);
10847
10848 free_netdev(dev);
10849
10850 if (atomic_read(&pdev->enable_cnt) == 1)
10851 pci_release_regions(pdev);
10852
10853 pci_disable_device(pdev);
10854 pci_set_drvdata(pdev, NULL);
10855
10856 return rc;
a2fbb9ea
ET
10857}
10858
10859static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10860{
10861 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10862 struct bnx2x *bp;
10863
10864 if (!dev) {
228241eb
ET
10865 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10866 return;
10867 }
228241eb 10868 bp = netdev_priv(dev);
a2fbb9ea 10869
a2fbb9ea
ET
10870 unregister_netdev(dev);
10871
10872 if (bp->regview)
10873 iounmap(bp->regview);
10874
10875 if (bp->doorbells)
10876 iounmap(bp->doorbells);
10877
10878 free_netdev(dev);
34f80b04
EG
10879
10880 if (atomic_read(&pdev->enable_cnt) == 1)
10881 pci_release_regions(pdev);
10882
a2fbb9ea
ET
10883 pci_disable_device(pdev);
10884 pci_set_drvdata(pdev, NULL);
10885}
10886
10887static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10888{
10889 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10890 struct bnx2x *bp;
10891
34f80b04
EG
10892 if (!dev) {
10893 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10894 return -ENODEV;
10895 }
10896 bp = netdev_priv(dev);
a2fbb9ea 10897
34f80b04 10898 rtnl_lock();
a2fbb9ea 10899
34f80b04 10900 pci_save_state(pdev);
228241eb 10901
34f80b04
EG
10902 if (!netif_running(dev)) {
10903 rtnl_unlock();
10904 return 0;
10905 }
a2fbb9ea
ET
10906
10907 netif_device_detach(dev);
a2fbb9ea 10908
da5a662a 10909 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 10910
a2fbb9ea 10911 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 10912
34f80b04
EG
10913 rtnl_unlock();
10914
a2fbb9ea
ET
10915 return 0;
10916}
10917
10918static int bnx2x_resume(struct pci_dev *pdev)
10919{
10920 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 10921 struct bnx2x *bp;
a2fbb9ea
ET
10922 int rc;
10923
228241eb
ET
10924 if (!dev) {
10925 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10926 return -ENODEV;
10927 }
228241eb 10928 bp = netdev_priv(dev);
a2fbb9ea 10929
34f80b04
EG
10930 rtnl_lock();
10931
228241eb 10932 pci_restore_state(pdev);
34f80b04
EG
10933
10934 if (!netif_running(dev)) {
10935 rtnl_unlock();
10936 return 0;
10937 }
10938
a2fbb9ea
ET
10939 bnx2x_set_power_state(bp, PCI_D0);
10940 netif_device_attach(dev);
10941
da5a662a 10942 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 10943
34f80b04
EG
10944 rtnl_unlock();
10945
10946 return rc;
a2fbb9ea
ET
10947}
10948
f8ef6e44
YG
10949static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10950{
10951 int i;
10952
10953 bp->state = BNX2X_STATE_ERROR;
10954
10955 bp->rx_mode = BNX2X_RX_MODE_NONE;
10956
10957 bnx2x_netif_stop(bp, 0);
10958
10959 del_timer_sync(&bp->timer);
10960 bp->stats_state = STATS_STATE_DISABLED;
10961 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
10962
10963 /* Release IRQs */
10964 bnx2x_free_irq(bp);
10965
10966 if (CHIP_IS_E1(bp)) {
10967 struct mac_configuration_cmd *config =
10968 bnx2x_sp(bp, mcast_config);
10969
8d9c5f34 10970 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
10971 CAM_INVALIDATE(config->config_table[i]);
10972 }
10973
10974 /* Free SKBs, SGEs, TPA pool and driver internals */
10975 bnx2x_free_skbs(bp);
555f6c78 10976 for_each_rx_queue(bp, i)
f8ef6e44 10977 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 10978 for_each_rx_queue(bp, i)
7cde1c8b 10979 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
10980 bnx2x_free_mem(bp);
10981
10982 bp->state = BNX2X_STATE_CLOSED;
10983
10984 netif_carrier_off(bp->dev);
10985
10986 return 0;
10987}
10988
10989static void bnx2x_eeh_recover(struct bnx2x *bp)
10990{
10991 u32 val;
10992
10993 mutex_init(&bp->port.phy_mutex);
10994
10995 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
10996 bp->link_params.shmem_base = bp->common.shmem_base;
10997 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
10998
10999 if (!bp->common.shmem_base ||
11000 (bp->common.shmem_base < 0xA0000) ||
11001 (bp->common.shmem_base >= 0xC0000)) {
11002 BNX2X_DEV_INFO("MCP not active\n");
11003 bp->flags |= NO_MCP_FLAG;
11004 return;
11005 }
11006
11007 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11008 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11009 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11010 BNX2X_ERR("BAD MCP validity signature\n");
11011
11012 if (!BP_NOMCP(bp)) {
11013 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11014 & DRV_MSG_SEQ_NUMBER_MASK);
11015 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11016 }
11017}
11018
493adb1f
WX
11019/**
11020 * bnx2x_io_error_detected - called when PCI error is detected
11021 * @pdev: Pointer to PCI device
11022 * @state: The current pci connection state
11023 *
11024 * This function is called after a PCI bus error affecting
11025 * this device has been detected.
11026 */
11027static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11028 pci_channel_state_t state)
11029{
11030 struct net_device *dev = pci_get_drvdata(pdev);
11031 struct bnx2x *bp = netdev_priv(dev);
11032
11033 rtnl_lock();
11034
11035 netif_device_detach(dev);
11036
11037 if (netif_running(dev))
f8ef6e44 11038 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
11039
11040 pci_disable_device(pdev);
11041
11042 rtnl_unlock();
11043
11044 /* Request a slot reset */
11045 return PCI_ERS_RESULT_NEED_RESET;
11046}
11047
11048/**
11049 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11050 * @pdev: Pointer to PCI device
11051 *
11052 * Restart the card from scratch, as if from a cold-boot.
11053 */
11054static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11055{
11056 struct net_device *dev = pci_get_drvdata(pdev);
11057 struct bnx2x *bp = netdev_priv(dev);
11058
11059 rtnl_lock();
11060
11061 if (pci_enable_device(pdev)) {
11062 dev_err(&pdev->dev,
11063 "Cannot re-enable PCI device after reset\n");
11064 rtnl_unlock();
11065 return PCI_ERS_RESULT_DISCONNECT;
11066 }
11067
11068 pci_set_master(pdev);
11069 pci_restore_state(pdev);
11070
11071 if (netif_running(dev))
11072 bnx2x_set_power_state(bp, PCI_D0);
11073
11074 rtnl_unlock();
11075
11076 return PCI_ERS_RESULT_RECOVERED;
11077}
11078
11079/**
11080 * bnx2x_io_resume - called when traffic can start flowing again
11081 * @pdev: Pointer to PCI device
11082 *
11083 * This callback is called when the error recovery driver tells us that
11084 * its OK to resume normal operation.
11085 */
11086static void bnx2x_io_resume(struct pci_dev *pdev)
11087{
11088 struct net_device *dev = pci_get_drvdata(pdev);
11089 struct bnx2x *bp = netdev_priv(dev);
11090
11091 rtnl_lock();
11092
f8ef6e44
YG
11093 bnx2x_eeh_recover(bp);
11094
493adb1f 11095 if (netif_running(dev))
f8ef6e44 11096 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
11097
11098 netif_device_attach(dev);
11099
11100 rtnl_unlock();
11101}
11102
11103static struct pci_error_handlers bnx2x_err_handler = {
11104 .error_detected = bnx2x_io_error_detected,
11105 .slot_reset = bnx2x_io_slot_reset,
11106 .resume = bnx2x_io_resume,
11107};
11108
a2fbb9ea 11109static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
11110 .name = DRV_MODULE_NAME,
11111 .id_table = bnx2x_pci_tbl,
11112 .probe = bnx2x_init_one,
11113 .remove = __devexit_p(bnx2x_remove_one),
11114 .suspend = bnx2x_suspend,
11115 .resume = bnx2x_resume,
11116 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
11117};
11118
11119static int __init bnx2x_init(void)
11120{
1cf167f2
EG
11121 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11122 if (bnx2x_wq == NULL) {
11123 printk(KERN_ERR PFX "Cannot create workqueue\n");
11124 return -ENOMEM;
11125 }
11126
a2fbb9ea
ET
11127 return pci_register_driver(&bnx2x_pci_driver);
11128}
11129
11130static void __exit bnx2x_cleanup(void)
11131{
11132 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
11133
11134 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
11135}
11136
11137module_init(bnx2x_init);
11138module_exit(bnx2x_cleanup);
11139