]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
bnx2x: BW shaper enhancements
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
53#include "bnx2x_reg.h"
54#include "bnx2x_fw_defs.h"
55#include "bnx2x_hsi.h"
c18487ee 56#include "bnx2x_link.h"
a2fbb9ea
ET
57#include "bnx2x.h"
58#include "bnx2x_init.h"
59
e8b5fc51
VZ
60#define DRV_MODULE_VERSION "1.45.26"
61#define DRV_MODULE_RELDATE "2009/01/26"
34f80b04 62#define BNX2X_BC_VER 0x040200
a2fbb9ea 63
34f80b04
EG
64/* Time in jiffies before concluding the transmitter is hung */
65#define TX_TIMEOUT (5*HZ)
a2fbb9ea 66
53a10565 67static char version[] __devinitdata =
34f80b04 68 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
69 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70
24e3fcef 71MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 72MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
73MODULE_LICENSE("GPL");
74MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 75
555f6c78
EG
76static int multi_mode = 1;
77module_param(multi_mode, int, 0);
78
19680c48 79static int disable_tpa;
a2fbb9ea 80static int poll;
a2fbb9ea 81static int debug;
34f80b04 82static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 83
19680c48 84module_param(disable_tpa, int, 0);
8badd27a
EG
85
86static int int_mode;
87module_param(int_mode, int, 0);
88MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
89
a2fbb9ea 90module_param(poll, int, 0);
a2fbb9ea 91module_param(debug, int, 0);
19680c48 92MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
a2fbb9ea 93MODULE_PARM_DESC(poll, "use polling (for debug)");
c14423fe 94MODULE_PARM_DESC(debug, "default debug msglevel");
a2fbb9ea 95
1cf167f2 96static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
97
98enum bnx2x_board_type {
99 BCM57710 = 0,
34f80b04
EG
100 BCM57711 = 1,
101 BCM57711E = 2,
a2fbb9ea
ET
102};
103
34f80b04 104/* indexed by board_type, above */
53a10565 105static struct {
a2fbb9ea
ET
106 char *name;
107} board_info[] __devinitdata = {
34f80b04
EG
108 { "Broadcom NetXtreme II BCM57710 XGb" },
109 { "Broadcom NetXtreme II BCM57711 XGb" },
110 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
111};
112
34f80b04 113
a2fbb9ea
ET
114static const struct pci_device_id bnx2x_pci_tbl[] = {
115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
116 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
121 { 0 }
122};
123
124MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
125
126/****************************************************************************
127* General service functions
128****************************************************************************/
129
130/* used only at init
131 * locking is done by mcp
132 */
133static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
134{
135 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
136 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
138 PCICFG_VENDOR_ID_OFFSET);
139}
140
a2fbb9ea
ET
141static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
142{
143 u32 val;
144
145 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
146 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
148 PCICFG_VENDOR_ID_OFFSET);
149
150 return val;
151}
a2fbb9ea
ET
152
153static const u32 dmae_reg_go_c[] = {
154 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
158};
159
160/* copy command into DMAE command memory and set DMAE command go */
161static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
162 int idx)
163{
164 u32 cmd_offset;
165 int i;
166
167 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
170
ad8d3948
EG
171 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
173 }
174 REG_WR(bp, dmae_reg_go_c[idx], 1);
175}
176
ad8d3948
EG
177void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
178 u32 len32)
a2fbb9ea 179{
ad8d3948 180 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 181 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
182 int cnt = 200;
183
184 if (!bp->dmae_ready) {
185 u32 *data = bnx2x_sp(bp, wb_data[0]);
186
187 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
188 " using indirect\n", dst_addr, len32);
189 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
190 return;
191 }
192
193 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
194
195 memset(dmae, 0, sizeof(struct dmae_command));
196
197 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
198 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
199 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
200#ifdef __BIG_ENDIAN
201 DMAE_CMD_ENDIANITY_B_DW_SWAP |
202#else
203 DMAE_CMD_ENDIANITY_DW_SWAP |
204#endif
34f80b04
EG
205 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
206 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
207 dmae->src_addr_lo = U64_LO(dma_addr);
208 dmae->src_addr_hi = U64_HI(dma_addr);
209 dmae->dst_addr_lo = dst_addr >> 2;
210 dmae->dst_addr_hi = 0;
211 dmae->len = len32;
212 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
213 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 214 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 215
ad8d3948 216 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
217 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
218 "dst_addr [%x:%08x (%08x)]\n"
219 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
220 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
221 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
222 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 223 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
224 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
225 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
226
227 *wb_comp = 0;
228
34f80b04 229 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
230
231 udelay(5);
ad8d3948
EG
232
233 while (*wb_comp != DMAE_COMP_VAL) {
234 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
235
ad8d3948 236 if (!cnt) {
a2fbb9ea
ET
237 BNX2X_ERR("dmae timeout!\n");
238 break;
239 }
ad8d3948 240 cnt--;
12469401
YG
241 /* adjust delay for emulation/FPGA */
242 if (CHIP_REV_IS_SLOW(bp))
243 msleep(100);
244 else
245 udelay(5);
a2fbb9ea 246 }
ad8d3948
EG
247
248 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
249}
250
c18487ee 251void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 252{
ad8d3948 253 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 254 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
255 int cnt = 200;
256
257 if (!bp->dmae_ready) {
258 u32 *data = bnx2x_sp(bp, wb_data[0]);
259 int i;
260
261 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
262 " using indirect\n", src_addr, len32);
263 for (i = 0; i < len32; i++)
264 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
265 return;
266 }
267
268 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
269
270 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
271 memset(dmae, 0, sizeof(struct dmae_command));
272
273 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
274 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
275 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
276#ifdef __BIG_ENDIAN
277 DMAE_CMD_ENDIANITY_B_DW_SWAP |
278#else
279 DMAE_CMD_ENDIANITY_DW_SWAP |
280#endif
34f80b04
EG
281 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
282 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
283 dmae->src_addr_lo = src_addr >> 2;
284 dmae->src_addr_hi = 0;
285 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
286 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
287 dmae->len = len32;
288 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
289 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 290 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 291
ad8d3948 292 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
293 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
294 "dst_addr [%x:%08x (%08x)]\n"
295 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
296 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
297 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
298 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
299
300 *wb_comp = 0;
301
34f80b04 302 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
303
304 udelay(5);
ad8d3948
EG
305
306 while (*wb_comp != DMAE_COMP_VAL) {
307
ad8d3948 308 if (!cnt) {
a2fbb9ea
ET
309 BNX2X_ERR("dmae timeout!\n");
310 break;
311 }
ad8d3948 312 cnt--;
12469401
YG
313 /* adjust delay for emulation/FPGA */
314 if (CHIP_REV_IS_SLOW(bp))
315 msleep(100);
316 else
317 udelay(5);
a2fbb9ea 318 }
ad8d3948 319 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
320 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
321 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
322
323 mutex_unlock(&bp->dmae_mutex);
324}
325
326/* used only for slowpath so not inlined */
327static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
328{
329 u32 wb_write[2];
330
331 wb_write[0] = val_hi;
332 wb_write[1] = val_lo;
333 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 334}
a2fbb9ea 335
ad8d3948
EG
336#ifdef USE_WB_RD
337static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
338{
339 u32 wb_data[2];
340
341 REG_RD_DMAE(bp, reg, wb_data, 2);
342
343 return HILO_U64(wb_data[0], wb_data[1]);
344}
345#endif
346
a2fbb9ea
ET
347static int bnx2x_mc_assert(struct bnx2x *bp)
348{
a2fbb9ea 349 char last_idx;
34f80b04
EG
350 int i, rc = 0;
351 u32 row0, row1, row2, row3;
352
353 /* XSTORM */
354 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
355 XSTORM_ASSERT_LIST_INDEX_OFFSET);
356 if (last_idx)
357 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
358
359 /* print the asserts */
360 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
361
362 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
363 XSTORM_ASSERT_LIST_OFFSET(i));
364 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
366 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
368 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
370
371 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
372 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
373 " 0x%08x 0x%08x 0x%08x\n",
374 i, row3, row2, row1, row0);
375 rc++;
376 } else {
377 break;
378 }
379 }
380
381 /* TSTORM */
382 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
383 TSTORM_ASSERT_LIST_INDEX_OFFSET);
384 if (last_idx)
385 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
386
387 /* print the asserts */
388 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
389
390 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
391 TSTORM_ASSERT_LIST_OFFSET(i));
392 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
394 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
396 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
398
399 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
400 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
401 " 0x%08x 0x%08x 0x%08x\n",
402 i, row3, row2, row1, row0);
403 rc++;
404 } else {
405 break;
406 }
407 }
408
409 /* CSTORM */
410 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
411 CSTORM_ASSERT_LIST_INDEX_OFFSET);
412 if (last_idx)
413 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
414
415 /* print the asserts */
416 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
417
418 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
419 CSTORM_ASSERT_LIST_OFFSET(i));
420 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
422 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
424 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
426
427 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
428 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
429 " 0x%08x 0x%08x 0x%08x\n",
430 i, row3, row2, row1, row0);
431 rc++;
432 } else {
433 break;
434 }
435 }
436
437 /* USTORM */
438 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
439 USTORM_ASSERT_LIST_INDEX_OFFSET);
440 if (last_idx)
441 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
442
443 /* print the asserts */
444 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
445
446 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
447 USTORM_ASSERT_LIST_OFFSET(i));
448 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i) + 4);
450 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 8);
452 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 12);
454
455 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
456 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
457 " 0x%08x 0x%08x 0x%08x\n",
458 i, row3, row2, row1, row0);
459 rc++;
460 } else {
461 break;
a2fbb9ea
ET
462 }
463 }
34f80b04 464
a2fbb9ea
ET
465 return rc;
466}
c14423fe 467
a2fbb9ea
ET
468static void bnx2x_fw_dump(struct bnx2x *bp)
469{
470 u32 mark, offset;
471 u32 data[9];
472 int word;
473
474 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
475 mark = ((mark + 0x3) & ~0x3);
476 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
477
478 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
479 for (word = 0; word < 8; word++)
480 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
481 offset + 4*word));
482 data[8] = 0x0;
49d66772 483 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
484 }
485 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
486 for (word = 0; word < 8; word++)
487 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
488 offset + 4*word));
489 data[8] = 0x0;
49d66772 490 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
491 }
492 printk("\n" KERN_ERR PFX "end of fw dump\n");
493}
494
495static void bnx2x_panic_dump(struct bnx2x *bp)
496{
497 int i;
498 u16 j, start, end;
499
66e855f3
YG
500 bp->stats_state = STATS_STATE_DISABLED;
501 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
502
a2fbb9ea
ET
503 BNX2X_ERR("begin crash dump -----------------\n");
504
505 for_each_queue(bp, i) {
506 struct bnx2x_fastpath *fp = &bp->fp[i];
507 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
508
509 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
34f80b04 510 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
a2fbb9ea 511 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
34f80b04 512 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
66e855f3
YG
513 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
514 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
515 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
516 fp->rx_bd_prod, fp->rx_bd_cons,
517 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
518 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
519 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
520 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
521 " *sb_u_idx(%x) bd data(%x,%x)\n",
522 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
523 fp->status_blk->c_status_block.status_block_index,
524 fp->fp_u_idx,
525 fp->status_blk->u_status_block.status_block_index,
526 hw_prods->packets_prod, hw_prods->bds_prod);
a2fbb9ea
ET
527
528 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
529 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
530 for (j = start; j < end; j++) {
531 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
532
533 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
534 sw_bd->skb, sw_bd->first_bd);
535 }
536
537 start = TX_BD(fp->tx_bd_cons - 10);
538 end = TX_BD(fp->tx_bd_cons + 254);
539 for (j = start; j < end; j++) {
540 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
541
542 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
543 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
544 }
545
546 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
547 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
548 for (j = start; j < end; j++) {
549 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
550 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
551
552 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
34f80b04 553 j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
554 }
555
3196a88a
EG
556 start = RX_SGE(fp->rx_sge_prod);
557 end = RX_SGE(fp->last_max_sge);
7a9b2557
VZ
558 for (j = start; j < end; j++) {
559 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
560 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
561
562 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
563 j, rx_sge[1], rx_sge[0], sw_page->page);
564 }
565
a2fbb9ea
ET
566 start = RCQ_BD(fp->rx_comp_cons - 10);
567 end = RCQ_BD(fp->rx_comp_cons + 503);
568 for (j = start; j < end; j++) {
569 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
570
571 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
572 j, cqe[0], cqe[1], cqe[2], cqe[3]);
573 }
574 }
575
49d66772
ET
576 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
577 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
a2fbb9ea 578 " spq_prod_idx(%u)\n",
49d66772 579 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
a2fbb9ea
ET
580 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
581
34f80b04 582 bnx2x_fw_dump(bp);
a2fbb9ea
ET
583 bnx2x_mc_assert(bp);
584 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
585}
586
615f8fd9 587static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 588{
34f80b04 589 int port = BP_PORT(bp);
a2fbb9ea
ET
590 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
591 u32 val = REG_RD(bp, addr);
592 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 593 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
594
595 if (msix) {
8badd27a
EG
596 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
597 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
598 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
599 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
600 } else if (msi) {
601 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
602 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
603 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
604 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
605 } else {
606 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 607 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
608 HC_CONFIG_0_REG_INT_LINE_EN_0 |
609 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 610
8badd27a
EG
611 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
612 val, port, addr);
615f8fd9
ET
613
614 REG_WR(bp, addr, val);
615
a2fbb9ea
ET
616 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
617 }
618
8badd27a
EG
619 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
620 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
621
622 REG_WR(bp, addr, val);
34f80b04
EG
623
624 if (CHIP_IS_E1H(bp)) {
625 /* init leading/trailing edge */
626 if (IS_E1HMF(bp)) {
8badd27a 627 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04
EG
628 if (bp->port.pmf)
629 /* enable nig attention */
630 val |= 0x0100;
631 } else
632 val = 0xffff;
633
634 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
635 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
636 }
a2fbb9ea
ET
637}
638
615f8fd9 639static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 640{
34f80b04 641 int port = BP_PORT(bp);
a2fbb9ea
ET
642 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
643 u32 val = REG_RD(bp, addr);
644
645 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
646 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
647 HC_CONFIG_0_REG_INT_LINE_EN_0 |
648 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
649
650 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
651 val, port, addr);
652
8badd27a
EG
653 /* flush all outstanding writes */
654 mmiowb();
655
a2fbb9ea
ET
656 REG_WR(bp, addr, val);
657 if (REG_RD(bp, addr) != val)
658 BNX2X_ERR("BUG! proper val not read from IGU!\n");
659}
660
f8ef6e44 661static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 662{
a2fbb9ea 663 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 664 int i, offset;
a2fbb9ea 665
34f80b04 666 /* disable interrupt handling */
a2fbb9ea 667 atomic_inc(&bp->intr_sem);
f8ef6e44
YG
668 if (disable_hw)
669 /* prevent the HW from sending interrupts */
670 bnx2x_int_disable(bp);
a2fbb9ea
ET
671
672 /* make sure all ISRs are done */
673 if (msix) {
8badd27a
EG
674 synchronize_irq(bp->msix_table[0].vector);
675 offset = 1;
a2fbb9ea 676 for_each_queue(bp, i)
8badd27a 677 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
678 } else
679 synchronize_irq(bp->pdev->irq);
680
681 /* make sure sp_task is not running */
1cf167f2
EG
682 cancel_delayed_work(&bp->sp_task);
683 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
684}
685
34f80b04 686/* fast path */
a2fbb9ea
ET
687
688/*
34f80b04 689 * General service functions
a2fbb9ea
ET
690 */
691
34f80b04 692static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
693 u8 storm, u16 index, u8 op, u8 update)
694{
5c862848
EG
695 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
696 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
697 struct igu_ack_register igu_ack;
698
699 igu_ack.status_block_index = index;
700 igu_ack.sb_id_and_flags =
34f80b04 701 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
702 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
703 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
704 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
705
5c862848
EG
706 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
707 (*(u32 *)&igu_ack), hc_addr);
708 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
a2fbb9ea
ET
709}
710
711static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
712{
713 struct host_status_block *fpsb = fp->status_blk;
714 u16 rc = 0;
715
716 barrier(); /* status block is written to by the chip */
717 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
718 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
719 rc |= 1;
720 }
721 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
722 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
723 rc |= 2;
724 }
725 return rc;
726}
727
a2fbb9ea
ET
728static u16 bnx2x_ack_int(struct bnx2x *bp)
729{
5c862848
EG
730 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
731 COMMAND_REG_SIMD_MASK);
732 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 733
5c862848
EG
734 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
735 result, hc_addr);
a2fbb9ea 736
a2fbb9ea
ET
737 return result;
738}
739
740
741/*
742 * fast path service functions
743 */
744
237907c1
EG
745static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
746{
747 u16 tx_cons_sb;
748
749 /* Tell compiler that status block fields can change */
750 barrier();
751 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
e8b5fc51
VZ
752 return (fp->tx_pkt_cons != tx_cons_sb);
753}
754
755static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
756{
757 /* Tell compiler that consumer and producer can change */
758 barrier();
759 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
760
237907c1
EG
761}
762
a2fbb9ea
ET
763/* free skb in the packet ring at pos idx
764 * return idx of last bd freed
765 */
766static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
767 u16 idx)
768{
769 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
770 struct eth_tx_bd *tx_bd;
771 struct sk_buff *skb = tx_buf->skb;
34f80b04 772 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
773 int nbd;
774
775 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
776 idx, tx_buf, skb);
777
778 /* unmap first bd */
779 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
780 tx_bd = &fp->tx_desc_ring[bd_idx];
781 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
782 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
783
784 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 785 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
786#ifdef BNX2X_STOP_ON_ERROR
787 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 788 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
789 bnx2x_panic();
790 }
791#endif
792
793 /* Skip a parse bd and the TSO split header bd
794 since they have no mapping */
795 if (nbd)
796 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
797
798 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
799 ETH_TX_BD_FLAGS_TCP_CSUM |
800 ETH_TX_BD_FLAGS_SW_LSO)) {
801 if (--nbd)
802 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
803 tx_bd = &fp->tx_desc_ring[bd_idx];
804 /* is this a TSO split header bd? */
805 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
806 if (--nbd)
807 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
808 }
809 }
810
811 /* now free frags */
812 while (nbd > 0) {
813
814 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
815 tx_bd = &fp->tx_desc_ring[bd_idx];
816 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
817 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
818 if (--nbd)
819 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
820 }
821
822 /* release skb */
53e5e96e 823 WARN_ON(!skb);
a2fbb9ea
ET
824 dev_kfree_skb(skb);
825 tx_buf->first_bd = 0;
826 tx_buf->skb = NULL;
827
34f80b04 828 return new_cons;
a2fbb9ea
ET
829}
830
34f80b04 831static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 832{
34f80b04
EG
833 s16 used;
834 u16 prod;
835 u16 cons;
a2fbb9ea 836
34f80b04 837 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
838 prod = fp->tx_bd_prod;
839 cons = fp->tx_bd_cons;
840
34f80b04
EG
841 /* NUM_TX_RINGS = number of "next-page" entries
842 It will be used as a threshold */
843 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 844
34f80b04 845#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
846 WARN_ON(used < 0);
847 WARN_ON(used > fp->bp->tx_ring_size);
848 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 849#endif
a2fbb9ea 850
34f80b04 851 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
852}
853
854static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
855{
856 struct bnx2x *bp = fp->bp;
555f6c78 857 struct netdev_queue *txq;
a2fbb9ea
ET
858 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
859 int done = 0;
860
861#ifdef BNX2X_STOP_ON_ERROR
862 if (unlikely(bp->panic))
863 return;
864#endif
865
555f6c78 866 txq = netdev_get_tx_queue(bp->dev, fp->index);
a2fbb9ea
ET
867 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
868 sw_cons = fp->tx_pkt_cons;
869
870 while (sw_cons != hw_cons) {
871 u16 pkt_cons;
872
873 pkt_cons = TX_BD(sw_cons);
874
875 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
876
34f80b04 877 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
878 hw_cons, sw_cons, pkt_cons);
879
34f80b04 880/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
881 rmb();
882 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
883 }
884*/
885 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
886 sw_cons++;
887 done++;
888
889 if (done == work)
890 break;
891 }
892
893 fp->tx_pkt_cons = sw_cons;
894 fp->tx_bd_cons = bd_cons;
895
555f6c78
EG
896 /* Need to make the tx_bd_cons update visible to start_xmit()
897 * before checking for netif_tx_queue_stopped(). Without the
a2fbb9ea
ET
898 * memory barrier, there is a small possibility that start_xmit()
899 * will miss it and cause the queue to be stopped forever.
900 */
901 smp_mb();
902
903 /* TBD need a thresh? */
555f6c78 904 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 905
555f6c78 906 __netif_tx_lock(txq, smp_processor_id());
a2fbb9ea 907
555f6c78 908 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 909 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 910 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 911 netif_tx_wake_queue(txq);
a2fbb9ea 912
555f6c78 913 __netif_tx_unlock(txq);
a2fbb9ea
ET
914 }
915}
916
3196a88a 917
a2fbb9ea
ET
918static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
919 union eth_rx_cqe *rr_cqe)
920{
921 struct bnx2x *bp = fp->bp;
922 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
923 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
924
34f80b04 925 DP(BNX2X_MSG_SP,
a2fbb9ea 926 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
34f80b04
EG
927 FP_IDX(fp), cid, command, bp->state,
928 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
929
930 bp->spq_left++;
931
34f80b04 932 if (FP_IDX(fp)) {
a2fbb9ea
ET
933 switch (command | fp->state) {
934 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
935 BNX2X_FP_STATE_OPENING):
936 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
937 cid);
938 fp->state = BNX2X_FP_STATE_OPEN;
939 break;
940
941 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
942 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
943 cid);
944 fp->state = BNX2X_FP_STATE_HALTED;
945 break;
946
947 default:
34f80b04
EG
948 BNX2X_ERR("unexpected MC reply (%d) "
949 "fp->state is %x\n", command, fp->state);
950 break;
a2fbb9ea 951 }
34f80b04 952 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
953 return;
954 }
c14423fe 955
a2fbb9ea
ET
956 switch (command | bp->state) {
957 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
958 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
959 bp->state = BNX2X_STATE_OPEN;
960 break;
961
962 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
963 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
964 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
965 fp->state = BNX2X_FP_STATE_HALTED;
966 break;
967
a2fbb9ea 968 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 969 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 970 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
971 break;
972
3196a88a 973
a2fbb9ea 974 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 975 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 976 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 977 bp->set_mac_pending = 0;
a2fbb9ea
ET
978 break;
979
49d66772 980 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 981 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
982 break;
983
a2fbb9ea 984 default:
34f80b04 985 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 986 command, bp->state);
34f80b04 987 break;
a2fbb9ea 988 }
34f80b04 989 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
990}
991
7a9b2557
VZ
992static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
993 struct bnx2x_fastpath *fp, u16 index)
994{
995 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
996 struct page *page = sw_buf->page;
997 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
998
999 /* Skip "next page" elements */
1000 if (!page)
1001 return;
1002
1003 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1004 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1005 __free_pages(page, PAGES_PER_SGE_SHIFT);
1006
1007 sw_buf->page = NULL;
1008 sge->addr_hi = 0;
1009 sge->addr_lo = 0;
1010}
1011
1012static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1013 struct bnx2x_fastpath *fp, int last)
1014{
1015 int i;
1016
1017 for (i = 0; i < last; i++)
1018 bnx2x_free_rx_sge(bp, fp, i);
1019}
1020
1021static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1022 struct bnx2x_fastpath *fp, u16 index)
1023{
1024 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1025 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1026 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1027 dma_addr_t mapping;
1028
1029 if (unlikely(page == NULL))
1030 return -ENOMEM;
1031
4f40f2cb 1032 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1033 PCI_DMA_FROMDEVICE);
8d8bb39b 1034 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1035 __free_pages(page, PAGES_PER_SGE_SHIFT);
1036 return -ENOMEM;
1037 }
1038
1039 sw_buf->page = page;
1040 pci_unmap_addr_set(sw_buf, mapping, mapping);
1041
1042 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1043 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1044
1045 return 0;
1046}
1047
a2fbb9ea
ET
1048static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1049 struct bnx2x_fastpath *fp, u16 index)
1050{
1051 struct sk_buff *skb;
1052 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1053 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1054 dma_addr_t mapping;
1055
1056 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1057 if (unlikely(skb == NULL))
1058 return -ENOMEM;
1059
437cf2f1 1060 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1061 PCI_DMA_FROMDEVICE);
8d8bb39b 1062 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1063 dev_kfree_skb(skb);
1064 return -ENOMEM;
1065 }
1066
1067 rx_buf->skb = skb;
1068 pci_unmap_addr_set(rx_buf, mapping, mapping);
1069
1070 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1071 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1072
1073 return 0;
1074}
1075
1076/* note that we are not allocating a new skb,
1077 * we are just moving one from cons to prod
1078 * we are not creating a new mapping,
1079 * so there is no need to check for dma_mapping_error().
1080 */
1081static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1082 struct sk_buff *skb, u16 cons, u16 prod)
1083{
1084 struct bnx2x *bp = fp->bp;
1085 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1086 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1087 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1088 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1089
1090 pci_dma_sync_single_for_device(bp->pdev,
1091 pci_unmap_addr(cons_rx_buf, mapping),
1092 bp->rx_offset + RX_COPY_THRESH,
1093 PCI_DMA_FROMDEVICE);
1094
1095 prod_rx_buf->skb = cons_rx_buf->skb;
1096 pci_unmap_addr_set(prod_rx_buf, mapping,
1097 pci_unmap_addr(cons_rx_buf, mapping));
1098 *prod_bd = *cons_bd;
1099}
1100
7a9b2557
VZ
1101static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1102 u16 idx)
1103{
1104 u16 last_max = fp->last_max_sge;
1105
1106 if (SUB_S16(idx, last_max) > 0)
1107 fp->last_max_sge = idx;
1108}
1109
1110static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1111{
1112 int i, j;
1113
1114 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1115 int idx = RX_SGE_CNT * i - 1;
1116
1117 for (j = 0; j < 2; j++) {
1118 SGE_MASK_CLEAR_BIT(fp, idx);
1119 idx--;
1120 }
1121 }
1122}
1123
1124static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1125 struct eth_fast_path_rx_cqe *fp_cqe)
1126{
1127 struct bnx2x *bp = fp->bp;
4f40f2cb 1128 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1129 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1130 SGE_PAGE_SHIFT;
7a9b2557
VZ
1131 u16 last_max, last_elem, first_elem;
1132 u16 delta = 0;
1133 u16 i;
1134
1135 if (!sge_len)
1136 return;
1137
1138 /* First mark all used pages */
1139 for (i = 0; i < sge_len; i++)
1140 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1141
1142 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1143 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1144
1145 /* Here we assume that the last SGE index is the biggest */
1146 prefetch((void *)(fp->sge_mask));
1147 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1148
1149 last_max = RX_SGE(fp->last_max_sge);
1150 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1151 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1152
1153 /* If ring is not full */
1154 if (last_elem + 1 != first_elem)
1155 last_elem++;
1156
1157 /* Now update the prod */
1158 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1159 if (likely(fp->sge_mask[i]))
1160 break;
1161
1162 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1163 delta += RX_SGE_MASK_ELEM_SZ;
1164 }
1165
1166 if (delta > 0) {
1167 fp->rx_sge_prod += delta;
1168 /* clear page-end entries */
1169 bnx2x_clear_sge_mask_next_elems(fp);
1170 }
1171
1172 DP(NETIF_MSG_RX_STATUS,
1173 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1174 fp->last_max_sge, fp->rx_sge_prod);
1175}
1176
1177static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1178{
1179 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1180 memset(fp->sge_mask, 0xff,
1181 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1182
33471629
EG
1183 /* Clear the two last indices in the page to 1:
1184 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1185 hence will never be indicated and should be removed from
1186 the calculations. */
1187 bnx2x_clear_sge_mask_next_elems(fp);
1188}
1189
1190static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1191 struct sk_buff *skb, u16 cons, u16 prod)
1192{
1193 struct bnx2x *bp = fp->bp;
1194 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1195 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1196 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1197 dma_addr_t mapping;
1198
1199 /* move empty skb from pool to prod and map it */
1200 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1201 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1202 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1203 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1204
1205 /* move partial skb from cons to pool (don't unmap yet) */
1206 fp->tpa_pool[queue] = *cons_rx_buf;
1207
1208 /* mark bin state as start - print error if current state != stop */
1209 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1210 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1211
1212 fp->tpa_state[queue] = BNX2X_TPA_START;
1213
1214 /* point prod_bd to new skb */
1215 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1216 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1217
1218#ifdef BNX2X_STOP_ON_ERROR
1219 fp->tpa_queue_used |= (1 << queue);
1220#ifdef __powerpc64__
1221 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1222#else
1223 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1224#endif
1225 fp->tpa_queue_used);
1226#endif
1227}
1228
1229static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1230 struct sk_buff *skb,
1231 struct eth_fast_path_rx_cqe *fp_cqe,
1232 u16 cqe_idx)
1233{
1234 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1235 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1236 u32 i, frag_len, frag_size, pages;
1237 int err;
1238 int j;
1239
1240 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1241 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1242
1243 /* This is needed in order to enable forwarding support */
1244 if (frag_size)
4f40f2cb 1245 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1246 max(frag_size, (u32)len_on_bd));
1247
1248#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1249 if (pages >
1250 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1251 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1252 pages, cqe_idx);
1253 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1254 fp_cqe->pkt_len, len_on_bd);
1255 bnx2x_panic();
1256 return -EINVAL;
1257 }
1258#endif
1259
1260 /* Run through the SGL and compose the fragmented skb */
1261 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1262 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1263
1264 /* FW gives the indices of the SGE as if the ring is an array
1265 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1266 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1267 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1268 old_rx_pg = *rx_pg;
1269
1270 /* If we fail to allocate a substitute page, we simply stop
1271 where we are and drop the whole packet */
1272 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1273 if (unlikely(err)) {
de832a55 1274 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1275 return err;
1276 }
1277
1278 /* Unmap the page as we r going to pass it to the stack */
1279 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1280 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1281
1282 /* Add one frag and update the appropriate fields in the skb */
1283 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1284
1285 skb->data_len += frag_len;
1286 skb->truesize += frag_len;
1287 skb->len += frag_len;
1288
1289 frag_size -= frag_len;
1290 }
1291
1292 return 0;
1293}
1294
1295static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1296 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1297 u16 cqe_idx)
1298{
1299 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1300 struct sk_buff *skb = rx_buf->skb;
1301 /* alloc new skb */
1302 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1303
1304 /* Unmap skb in the pool anyway, as we are going to change
1305 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1306 fails. */
1307 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1308 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1309
7a9b2557 1310 if (likely(new_skb)) {
66e855f3
YG
1311 /* fix ip xsum and give it to the stack */
1312 /* (no need to map the new skb) */
0c6671b0
EG
1313#ifdef BCM_VLAN
1314 int is_vlan_cqe =
1315 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1316 PARSING_FLAGS_VLAN);
1317 int is_not_hwaccel_vlan_cqe =
1318 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1319#endif
7a9b2557
VZ
1320
1321 prefetch(skb);
1322 prefetch(((char *)(skb)) + 128);
1323
7a9b2557
VZ
1324#ifdef BNX2X_STOP_ON_ERROR
1325 if (pad + len > bp->rx_buf_size) {
1326 BNX2X_ERR("skb_put is about to fail... "
1327 "pad %d len %d rx_buf_size %d\n",
1328 pad, len, bp->rx_buf_size);
1329 bnx2x_panic();
1330 return;
1331 }
1332#endif
1333
1334 skb_reserve(skb, pad);
1335 skb_put(skb, len);
1336
1337 skb->protocol = eth_type_trans(skb, bp->dev);
1338 skb->ip_summed = CHECKSUM_UNNECESSARY;
1339
1340 {
1341 struct iphdr *iph;
1342
1343 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1344#ifdef BCM_VLAN
1345 /* If there is no Rx VLAN offloading -
1346 take VLAN tag into an account */
1347 if (unlikely(is_not_hwaccel_vlan_cqe))
1348 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1349#endif
7a9b2557
VZ
1350 iph->check = 0;
1351 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1352 }
1353
1354 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1355 &cqe->fast_path_cqe, cqe_idx)) {
1356#ifdef BCM_VLAN
0c6671b0
EG
1357 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1358 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1359 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1360 le16_to_cpu(cqe->fast_path_cqe.
1361 vlan_tag));
1362 else
1363#endif
1364 netif_receive_skb(skb);
1365 } else {
1366 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1367 " - dropping packet!\n");
1368 dev_kfree_skb(skb);
1369 }
1370
7a9b2557
VZ
1371
1372 /* put new skb in bin */
1373 fp->tpa_pool[queue].skb = new_skb;
1374
1375 } else {
66e855f3 1376 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1377 DP(NETIF_MSG_RX_STATUS,
1378 "Failed to allocate new skb - dropping packet!\n");
de832a55 1379 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1380 }
1381
1382 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1383}
1384
1385static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1386 struct bnx2x_fastpath *fp,
1387 u16 bd_prod, u16 rx_comp_prod,
1388 u16 rx_sge_prod)
1389{
8d9c5f34 1390 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1391 int i;
1392
1393 /* Update producers */
1394 rx_prods.bd_prod = bd_prod;
1395 rx_prods.cqe_prod = rx_comp_prod;
1396 rx_prods.sge_prod = rx_sge_prod;
1397
58f4c4cf
EG
1398 /*
1399 * Make sure that the BD and SGE data is updated before updating the
1400 * producers since FW might read the BD/SGE right after the producer
1401 * is updated.
1402 * This is only applicable for weak-ordered memory model archs such
1403 * as IA-64. The following barrier is also mandatory since FW will
1404 * assumes BDs must have buffers.
1405 */
1406 wmb();
1407
8d9c5f34
EG
1408 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1409 REG_WR(bp, BAR_USTRORM_INTMEM +
1410 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
7a9b2557
VZ
1411 ((u32 *)&rx_prods)[i]);
1412
58f4c4cf
EG
1413 mmiowb(); /* keep prod updates ordered */
1414
7a9b2557 1415 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1416 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1417 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1418}
1419
a2fbb9ea
ET
1420static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1421{
1422 struct bnx2x *bp = fp->bp;
34f80b04 1423 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1424 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1425 int rx_pkt = 0;
1426
1427#ifdef BNX2X_STOP_ON_ERROR
1428 if (unlikely(bp->panic))
1429 return 0;
1430#endif
1431
34f80b04
EG
1432 /* CQ "next element" is of the size of the regular element,
1433 that's why it's ok here */
a2fbb9ea
ET
1434 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1435 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1436 hw_comp_cons++;
1437
1438 bd_cons = fp->rx_bd_cons;
1439 bd_prod = fp->rx_bd_prod;
34f80b04 1440 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1441 sw_comp_cons = fp->rx_comp_cons;
1442 sw_comp_prod = fp->rx_comp_prod;
1443
1444 /* Memory barrier necessary as speculative reads of the rx
1445 * buffer can be ahead of the index in the status block
1446 */
1447 rmb();
1448
1449 DP(NETIF_MSG_RX_STATUS,
1450 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
34f80b04 1451 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1452
1453 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1454 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1455 struct sk_buff *skb;
1456 union eth_rx_cqe *cqe;
34f80b04
EG
1457 u8 cqe_fp_flags;
1458 u16 len, pad;
a2fbb9ea
ET
1459
1460 comp_ring_cons = RCQ_BD(sw_comp_cons);
1461 bd_prod = RX_BD(bd_prod);
1462 bd_cons = RX_BD(bd_cons);
1463
1464 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1465 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1466
a2fbb9ea 1467 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1468 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1469 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1470 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1471 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1472 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1473
1474 /* is this a slowpath msg? */
34f80b04 1475 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1476 bnx2x_sp_event(fp, cqe);
1477 goto next_cqe;
1478
1479 /* this is an rx packet */
1480 } else {
1481 rx_buf = &fp->rx_buf_ring[bd_cons];
1482 skb = rx_buf->skb;
a2fbb9ea
ET
1483 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1484 pad = cqe->fast_path_cqe.placement_offset;
1485
7a9b2557
VZ
1486 /* If CQE is marked both TPA_START and TPA_END
1487 it is a non-TPA CQE */
1488 if ((!fp->disable_tpa) &&
1489 (TPA_TYPE(cqe_fp_flags) !=
1490 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1491 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1492
1493 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1494 DP(NETIF_MSG_RX_STATUS,
1495 "calling tpa_start on queue %d\n",
1496 queue);
1497
1498 bnx2x_tpa_start(fp, queue, skb,
1499 bd_cons, bd_prod);
1500 goto next_rx;
1501 }
1502
1503 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1504 DP(NETIF_MSG_RX_STATUS,
1505 "calling tpa_stop on queue %d\n",
1506 queue);
1507
1508 if (!BNX2X_RX_SUM_FIX(cqe))
1509 BNX2X_ERR("STOP on none TCP "
1510 "data\n");
1511
1512 /* This is a size of the linear data
1513 on this skb */
1514 len = le16_to_cpu(cqe->fast_path_cqe.
1515 len_on_bd);
1516 bnx2x_tpa_stop(bp, fp, queue, pad,
1517 len, cqe, comp_ring_cons);
1518#ifdef BNX2X_STOP_ON_ERROR
1519 if (bp->panic)
1520 return -EINVAL;
1521#endif
1522
1523 bnx2x_update_sge_prod(fp,
1524 &cqe->fast_path_cqe);
1525 goto next_cqe;
1526 }
1527 }
1528
a2fbb9ea
ET
1529 pci_dma_sync_single_for_device(bp->pdev,
1530 pci_unmap_addr(rx_buf, mapping),
1531 pad + RX_COPY_THRESH,
1532 PCI_DMA_FROMDEVICE);
1533 prefetch(skb);
1534 prefetch(((char *)(skb)) + 128);
1535
1536 /* is this an error packet? */
34f80b04 1537 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1538 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1539 "ERROR flags %x rx packet %u\n",
1540 cqe_fp_flags, sw_comp_cons);
de832a55 1541 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1542 goto reuse_rx;
1543 }
1544
1545 /* Since we don't have a jumbo ring
1546 * copy small packets if mtu > 1500
1547 */
1548 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1549 (len <= RX_COPY_THRESH)) {
1550 struct sk_buff *new_skb;
1551
1552 new_skb = netdev_alloc_skb(bp->dev,
1553 len + pad);
1554 if (new_skb == NULL) {
1555 DP(NETIF_MSG_RX_ERR,
34f80b04 1556 "ERROR packet dropped "
a2fbb9ea 1557 "because of alloc failure\n");
de832a55 1558 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1559 goto reuse_rx;
1560 }
1561
1562 /* aligned copy */
1563 skb_copy_from_linear_data_offset(skb, pad,
1564 new_skb->data + pad, len);
1565 skb_reserve(new_skb, pad);
1566 skb_put(new_skb, len);
1567
1568 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1569
1570 skb = new_skb;
1571
1572 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1573 pci_unmap_single(bp->pdev,
1574 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1575 bp->rx_buf_size,
a2fbb9ea
ET
1576 PCI_DMA_FROMDEVICE);
1577 skb_reserve(skb, pad);
1578 skb_put(skb, len);
1579
1580 } else {
1581 DP(NETIF_MSG_RX_ERR,
34f80b04 1582 "ERROR packet dropped because "
a2fbb9ea 1583 "of alloc failure\n");
de832a55 1584 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1585reuse_rx:
1586 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1587 goto next_rx;
1588 }
1589
1590 skb->protocol = eth_type_trans(skb, bp->dev);
1591
1592 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1593 if (bp->rx_csum) {
1adcd8be
EG
1594 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1595 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1596 else
de832a55 1597 fp->eth_q_stats.hw_csum_err++;
66e855f3 1598 }
a2fbb9ea
ET
1599 }
1600
748e5439 1601 skb_record_rx_queue(skb, fp->index);
a2fbb9ea 1602#ifdef BCM_VLAN
0c6671b0 1603 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1604 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1605 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1606 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1607 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1608 else
1609#endif
34f80b04 1610 netif_receive_skb(skb);
a2fbb9ea 1611
a2fbb9ea
ET
1612
1613next_rx:
1614 rx_buf->skb = NULL;
1615
1616 bd_cons = NEXT_RX_IDX(bd_cons);
1617 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1618 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1619 rx_pkt++;
a2fbb9ea
ET
1620next_cqe:
1621 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1622 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1623
34f80b04 1624 if (rx_pkt == budget)
a2fbb9ea
ET
1625 break;
1626 } /* while */
1627
1628 fp->rx_bd_cons = bd_cons;
34f80b04 1629 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1630 fp->rx_comp_cons = sw_comp_cons;
1631 fp->rx_comp_prod = sw_comp_prod;
1632
7a9b2557
VZ
1633 /* Update producers */
1634 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1635 fp->rx_sge_prod);
a2fbb9ea
ET
1636
1637 fp->rx_pkt += rx_pkt;
1638 fp->rx_calls++;
1639
1640 return rx_pkt;
1641}
1642
1643static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1644{
1645 struct bnx2x_fastpath *fp = fp_cookie;
1646 struct bnx2x *bp = fp->bp;
34f80b04 1647 int index = FP_IDX(fp);
a2fbb9ea 1648
da5a662a
VZ
1649 /* Return here if interrupt is disabled */
1650 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1651 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1652 return IRQ_HANDLED;
1653 }
1654
34f80b04
EG
1655 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1656 index, FP_SB_ID(fp));
1657 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1658
1659#ifdef BNX2X_STOP_ON_ERROR
1660 if (unlikely(bp->panic))
1661 return IRQ_HANDLED;
1662#endif
1663
1664 prefetch(fp->rx_cons_sb);
1665 prefetch(fp->tx_cons_sb);
1666 prefetch(&fp->status_blk->c_status_block.status_block_index);
1667 prefetch(&fp->status_blk->u_status_block.status_block_index);
1668
288379f0 1669 napi_schedule(&bnx2x_fp(bp, index, napi));
34f80b04 1670
a2fbb9ea
ET
1671 return IRQ_HANDLED;
1672}
1673
1674static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1675{
555f6c78 1676 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1677 u16 status = bnx2x_ack_int(bp);
34f80b04 1678 u16 mask;
a2fbb9ea 1679
34f80b04 1680 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1681 if (unlikely(status == 0)) {
1682 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1683 return IRQ_NONE;
1684 }
34f80b04 1685 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
a2fbb9ea 1686
34f80b04 1687 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1688 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1689 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1690 return IRQ_HANDLED;
1691 }
1692
3196a88a
EG
1693#ifdef BNX2X_STOP_ON_ERROR
1694 if (unlikely(bp->panic))
1695 return IRQ_HANDLED;
1696#endif
1697
34f80b04
EG
1698 mask = 0x2 << bp->fp[0].sb_id;
1699 if (status & mask) {
a2fbb9ea
ET
1700 struct bnx2x_fastpath *fp = &bp->fp[0];
1701
1702 prefetch(fp->rx_cons_sb);
1703 prefetch(fp->tx_cons_sb);
1704 prefetch(&fp->status_blk->c_status_block.status_block_index);
1705 prefetch(&fp->status_blk->u_status_block.status_block_index);
1706
288379f0 1707 napi_schedule(&bnx2x_fp(bp, 0, napi));
a2fbb9ea 1708
34f80b04 1709 status &= ~mask;
a2fbb9ea
ET
1710 }
1711
a2fbb9ea 1712
34f80b04 1713 if (unlikely(status & 0x1)) {
1cf167f2 1714 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1715
1716 status &= ~0x1;
1717 if (!status)
1718 return IRQ_HANDLED;
1719 }
1720
34f80b04
EG
1721 if (status)
1722 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1723 status);
a2fbb9ea 1724
c18487ee 1725 return IRQ_HANDLED;
a2fbb9ea
ET
1726}
1727
c18487ee 1728/* end of fast path */
a2fbb9ea 1729
bb2a0f7a 1730static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1731
c18487ee
YR
1732/* Link */
1733
1734/*
1735 * General service functions
1736 */
a2fbb9ea 1737
4a37fb66 1738static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1739{
1740 u32 lock_status;
1741 u32 resource_bit = (1 << resource);
4a37fb66
YG
1742 int func = BP_FUNC(bp);
1743 u32 hw_lock_control_reg;
c18487ee 1744 int cnt;
a2fbb9ea 1745
c18487ee
YR
1746 /* Validating that the resource is within range */
1747 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1748 DP(NETIF_MSG_HW,
1749 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1750 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1751 return -EINVAL;
1752 }
a2fbb9ea 1753
4a37fb66
YG
1754 if (func <= 5) {
1755 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1756 } else {
1757 hw_lock_control_reg =
1758 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1759 }
1760
c18487ee 1761 /* Validating that the resource is not already taken */
4a37fb66 1762 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1763 if (lock_status & resource_bit) {
1764 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1765 lock_status, resource_bit);
1766 return -EEXIST;
1767 }
a2fbb9ea 1768
46230476
EG
1769 /* Try for 5 second every 5ms */
1770 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1771 /* Try to acquire the lock */
4a37fb66
YG
1772 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1773 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1774 if (lock_status & resource_bit)
1775 return 0;
a2fbb9ea 1776
c18487ee 1777 msleep(5);
a2fbb9ea 1778 }
c18487ee
YR
1779 DP(NETIF_MSG_HW, "Timeout\n");
1780 return -EAGAIN;
1781}
a2fbb9ea 1782
4a37fb66 1783static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1784{
1785 u32 lock_status;
1786 u32 resource_bit = (1 << resource);
4a37fb66
YG
1787 int func = BP_FUNC(bp);
1788 u32 hw_lock_control_reg;
a2fbb9ea 1789
c18487ee
YR
1790 /* Validating that the resource is within range */
1791 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1792 DP(NETIF_MSG_HW,
1793 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1794 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1795 return -EINVAL;
1796 }
1797
4a37fb66
YG
1798 if (func <= 5) {
1799 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1800 } else {
1801 hw_lock_control_reg =
1802 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1803 }
1804
c18487ee 1805 /* Validating that the resource is currently taken */
4a37fb66 1806 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1807 if (!(lock_status & resource_bit)) {
1808 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1809 lock_status, resource_bit);
1810 return -EFAULT;
a2fbb9ea
ET
1811 }
1812
4a37fb66 1813 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1814 return 0;
1815}
1816
1817/* HW Lock for shared dual port PHYs */
4a37fb66 1818static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee
YR
1819{
1820 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1821
34f80b04 1822 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1823
c18487ee
YR
1824 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1825 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1826 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
c18487ee 1827}
a2fbb9ea 1828
4a37fb66 1829static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee
YR
1830{
1831 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1832
c18487ee
YR
1833 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1834 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1835 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
a2fbb9ea 1836
34f80b04 1837 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1838}
a2fbb9ea 1839
17de50b7 1840int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1841{
1842 /* The GPIO should be swapped if swap register is set and active */
1843 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1844 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1845 int gpio_shift = gpio_num +
1846 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1847 u32 gpio_mask = (1 << gpio_shift);
1848 u32 gpio_reg;
a2fbb9ea 1849
c18487ee
YR
1850 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1851 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1852 return -EINVAL;
1853 }
a2fbb9ea 1854
4a37fb66 1855 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1856 /* read GPIO and mask except the float bits */
1857 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1858
c18487ee
YR
1859 switch (mode) {
1860 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1861 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1862 gpio_num, gpio_shift);
1863 /* clear FLOAT and set CLR */
1864 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1865 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1866 break;
a2fbb9ea 1867
c18487ee
YR
1868 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1869 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1870 gpio_num, gpio_shift);
1871 /* clear FLOAT and set SET */
1872 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1873 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1874 break;
a2fbb9ea 1875
17de50b7 1876 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1877 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1878 gpio_num, gpio_shift);
1879 /* set FLOAT */
1880 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1881 break;
a2fbb9ea 1882
c18487ee
YR
1883 default:
1884 break;
a2fbb9ea
ET
1885 }
1886
c18487ee 1887 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1888 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1889
c18487ee 1890 return 0;
a2fbb9ea
ET
1891}
1892
c18487ee 1893static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1894{
c18487ee
YR
1895 u32 spio_mask = (1 << spio_num);
1896 u32 spio_reg;
a2fbb9ea 1897
c18487ee
YR
1898 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1899 (spio_num > MISC_REGISTERS_SPIO_7)) {
1900 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1901 return -EINVAL;
a2fbb9ea
ET
1902 }
1903
4a37fb66 1904 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1905 /* read SPIO and mask except the float bits */
1906 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1907
c18487ee 1908 switch (mode) {
6378c025 1909 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
1910 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1911 /* clear FLOAT and set CLR */
1912 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1913 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1914 break;
a2fbb9ea 1915
6378c025 1916 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
1917 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1918 /* clear FLOAT and set SET */
1919 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1920 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1921 break;
a2fbb9ea 1922
c18487ee
YR
1923 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1924 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1925 /* set FLOAT */
1926 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1927 break;
a2fbb9ea 1928
c18487ee
YR
1929 default:
1930 break;
a2fbb9ea
ET
1931 }
1932
c18487ee 1933 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 1934 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 1935
a2fbb9ea
ET
1936 return 0;
1937}
1938
c18487ee 1939static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 1940{
ad33ea3a
EG
1941 switch (bp->link_vars.ieee_fc &
1942 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 1943 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 1944 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1945 ADVERTISED_Pause);
1946 break;
1947 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 1948 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
1949 ADVERTISED_Pause);
1950 break;
1951 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 1952 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee
YR
1953 break;
1954 default:
34f80b04 1955 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
1956 ADVERTISED_Pause);
1957 break;
1958 }
1959}
f1410647 1960
c18487ee
YR
1961static void bnx2x_link_report(struct bnx2x *bp)
1962{
1963 if (bp->link_vars.link_up) {
1964 if (bp->state == BNX2X_STATE_OPEN)
1965 netif_carrier_on(bp->dev);
1966 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 1967
c18487ee 1968 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 1969
c18487ee
YR
1970 if (bp->link_vars.duplex == DUPLEX_FULL)
1971 printk("full duplex");
1972 else
1973 printk("half duplex");
f1410647 1974
c0700f90
DM
1975 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
1976 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 1977 printk(", receive ");
c0700f90 1978 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
c18487ee
YR
1979 printk("& transmit ");
1980 } else {
1981 printk(", transmit ");
1982 }
1983 printk("flow control ON");
1984 }
1985 printk("\n");
f1410647 1986
c18487ee
YR
1987 } else { /* link_down */
1988 netif_carrier_off(bp->dev);
1989 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 1990 }
c18487ee
YR
1991}
1992
1993static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
1994{
19680c48
EG
1995 if (!BP_NOMCP(bp)) {
1996 u8 rc;
a2fbb9ea 1997
19680c48 1998 /* Initialize link parameters structure variables */
8c99e7b0
YR
1999 /* It is recommended to turn off RX FC for jumbo frames
2000 for better performance */
2001 if (IS_E1HMF(bp))
c0700f90 2002 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
8c99e7b0 2003 else if (bp->dev->mtu > 5000)
c0700f90 2004 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2005 else
c0700f90 2006 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2007
4a37fb66 2008 bnx2x_acquire_phy_lock(bp);
19680c48 2009 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2010 bnx2x_release_phy_lock(bp);
a2fbb9ea 2011
3c96c68b
EG
2012 bnx2x_calc_fc_adv(bp);
2013
19680c48
EG
2014 if (bp->link_vars.link_up)
2015 bnx2x_link_report(bp);
a2fbb9ea 2016
34f80b04 2017
19680c48
EG
2018 return rc;
2019 }
2020 BNX2X_ERR("Bootcode is missing -not initializing link\n");
2021 return -EINVAL;
a2fbb9ea
ET
2022}
2023
c18487ee 2024static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2025{
19680c48 2026 if (!BP_NOMCP(bp)) {
4a37fb66 2027 bnx2x_acquire_phy_lock(bp);
19680c48 2028 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2029 bnx2x_release_phy_lock(bp);
a2fbb9ea 2030
19680c48
EG
2031 bnx2x_calc_fc_adv(bp);
2032 } else
2033 BNX2X_ERR("Bootcode is missing -not setting link\n");
c18487ee 2034}
a2fbb9ea 2035
c18487ee
YR
2036static void bnx2x__link_reset(struct bnx2x *bp)
2037{
19680c48 2038 if (!BP_NOMCP(bp)) {
4a37fb66 2039 bnx2x_acquire_phy_lock(bp);
19680c48 2040 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
4a37fb66 2041 bnx2x_release_phy_lock(bp);
19680c48
EG
2042 } else
2043 BNX2X_ERR("Bootcode is missing -not resetting link\n");
c18487ee 2044}
a2fbb9ea 2045
c18487ee
YR
2046static u8 bnx2x_link_test(struct bnx2x *bp)
2047{
2048 u8 rc;
a2fbb9ea 2049
4a37fb66 2050 bnx2x_acquire_phy_lock(bp);
c18487ee 2051 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2052 bnx2x_release_phy_lock(bp);
a2fbb9ea 2053
c18487ee
YR
2054 return rc;
2055}
a2fbb9ea 2056
8a1c38d1 2057static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2058{
8a1c38d1
EG
2059 u32 r_param = bp->link_vars.line_speed / 8;
2060 u32 fair_periodic_timeout_usec;
2061 u32 t_fair;
34f80b04 2062
8a1c38d1
EG
2063 memset(&(bp->cmng.rs_vars), 0,
2064 sizeof(struct rate_shaping_vars_per_port));
2065 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2066
8a1c38d1
EG
2067 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2068 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2069
8a1c38d1
EG
2070 /* this is the threshold below which no timer arming will occur
2071 1.25 coefficient is for the threshold to be a little bigger
2072 than the real time, to compensate for timer in-accuracy */
2073 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2074 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2075
8a1c38d1
EG
2076 /* resolution of fairness timer */
2077 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2078 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2079 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2080
8a1c38d1
EG
2081 /* this is the threshold below which we won't arm the timer anymore */
2082 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2083
8a1c38d1
EG
2084 /* we multiply by 1e3/8 to get bytes/msec.
2085 We don't want the credits to pass a credit
2086 of the t_fair*FAIR_MEM (algorithm resolution) */
2087 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2088 /* since each tick is 4 usec */
2089 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2090}
2091
8a1c38d1 2092static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2093{
2094 struct rate_shaping_vars_per_vn m_rs_vn;
2095 struct fairness_vars_per_vn m_fair_vn;
2096 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2097 u16 vn_min_rate, vn_max_rate;
2098 int i;
2099
2100 /* If function is hidden - set min and max to zeroes */
2101 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2102 vn_min_rate = 0;
2103 vn_max_rate = 0;
2104
2105 } else {
2106 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2107 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
8a1c38d1 2108 /* If fairness is enabled (not all min rates are zeroes) and
34f80b04 2109 if current min rate is zero - set it to 1.
33471629 2110 This is a requirement of the algorithm. */
8a1c38d1 2111 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04
EG
2112 vn_min_rate = DEF_MIN_RATE;
2113 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2114 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2115 }
2116
8a1c38d1
EG
2117 DP(NETIF_MSG_IFUP,
2118 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2119 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2120
2121 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2122 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2123
2124 /* global vn counter - maximal Mbps for this vn */
2125 m_rs_vn.vn_counter.rate = vn_max_rate;
2126
2127 /* quota - number of bytes transmitted in this period */
2128 m_rs_vn.vn_counter.quota =
2129 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2130
8a1c38d1 2131 if (bp->vn_weight_sum) {
34f80b04
EG
2132 /* credit for each period of the fairness algorithm:
2133 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2134 vn_weight_sum should not be larger than 10000, thus
2135 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2136 than zero */
34f80b04 2137 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2138 max((u32)(vn_min_rate * (T_FAIR_COEF /
2139 (8 * bp->vn_weight_sum))),
2140 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2141 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2142 m_fair_vn.vn_credit_delta);
2143 }
2144
34f80b04
EG
2145 /* Store it to internal memory */
2146 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2147 REG_WR(bp, BAR_XSTRORM_INTMEM +
2148 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2149 ((u32 *)(&m_rs_vn))[i]);
2150
2151 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2152 REG_WR(bp, BAR_XSTRORM_INTMEM +
2153 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2154 ((u32 *)(&m_fair_vn))[i]);
2155}
2156
8a1c38d1 2157
c18487ee
YR
2158/* This function is called upon link interrupt */
2159static void bnx2x_link_attn(struct bnx2x *bp)
2160{
bb2a0f7a
YG
2161 /* Make sure that we are synced with the current statistics */
2162 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2163
c18487ee 2164 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2165
bb2a0f7a
YG
2166 if (bp->link_vars.link_up) {
2167
2168 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2169 struct host_port_stats *pstats;
2170
2171 pstats = bnx2x_sp(bp, port_stats);
2172 /* reset old bmac stats */
2173 memset(&(pstats->mac_stx[0]), 0,
2174 sizeof(struct mac_stx));
2175 }
2176 if ((bp->state == BNX2X_STATE_OPEN) ||
2177 (bp->state == BNX2X_STATE_DISABLED))
2178 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2179 }
2180
c18487ee
YR
2181 /* indicate link status */
2182 bnx2x_link_report(bp);
34f80b04
EG
2183
2184 if (IS_E1HMF(bp)) {
8a1c38d1 2185 int port = BP_PORT(bp);
34f80b04 2186 int func;
8a1c38d1 2187 int vn;
34f80b04
EG
2188
2189 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2190 if (vn == BP_E1HVN(bp))
2191 continue;
2192
8a1c38d1 2193 func = ((vn << 1) | port);
34f80b04
EG
2194
2195 /* Set the attention towards other drivers
2196 on the same port */
2197 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2198 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2199 }
34f80b04 2200
8a1c38d1
EG
2201 if (bp->link_vars.link_up) {
2202 int i;
2203
2204 /* Init rate shaping and fairness contexts */
2205 bnx2x_init_port_minmax(bp);
34f80b04 2206
34f80b04 2207 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2208 bnx2x_init_vn_minmax(bp, 2*vn + port);
2209
2210 /* Store it to internal memory */
2211 for (i = 0;
2212 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2213 REG_WR(bp, BAR_XSTRORM_INTMEM +
2214 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2215 ((u32 *)(&bp->cmng))[i]);
2216 }
34f80b04 2217 }
c18487ee 2218}
a2fbb9ea 2219
c18487ee
YR
2220static void bnx2x__link_status_update(struct bnx2x *bp)
2221{
2222 if (bp->state != BNX2X_STATE_OPEN)
2223 return;
a2fbb9ea 2224
c18487ee 2225 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2226
bb2a0f7a
YG
2227 if (bp->link_vars.link_up)
2228 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2229 else
2230 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2231
c18487ee
YR
2232 /* indicate link status */
2233 bnx2x_link_report(bp);
a2fbb9ea 2234}
a2fbb9ea 2235
34f80b04
EG
2236static void bnx2x_pmf_update(struct bnx2x *bp)
2237{
2238 int port = BP_PORT(bp);
2239 u32 val;
2240
2241 bp->port.pmf = 1;
2242 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2243
2244 /* enable nig attention */
2245 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2246 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2247 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2248
2249 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2250}
2251
c18487ee 2252/* end of Link */
a2fbb9ea
ET
2253
2254/* slow path */
2255
2256/*
2257 * General service functions
2258 */
2259
2260/* the slow path queue is odd since completions arrive on the fastpath ring */
2261static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2262 u32 data_hi, u32 data_lo, int common)
2263{
34f80b04 2264 int func = BP_FUNC(bp);
a2fbb9ea 2265
34f80b04
EG
2266 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2267 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2268 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2269 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2270 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2271
2272#ifdef BNX2X_STOP_ON_ERROR
2273 if (unlikely(bp->panic))
2274 return -EIO;
2275#endif
2276
34f80b04 2277 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2278
2279 if (!bp->spq_left) {
2280 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2281 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2282 bnx2x_panic();
2283 return -EBUSY;
2284 }
f1410647 2285
a2fbb9ea
ET
2286 /* CID needs port number to be encoded int it */
2287 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2288 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2289 HW_CID(bp, cid)));
2290 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2291 if (common)
2292 bp->spq_prod_bd->hdr.type |=
2293 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2294
2295 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2296 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2297
2298 bp->spq_left--;
2299
2300 if (bp->spq_prod_bd == bp->spq_last_bd) {
2301 bp->spq_prod_bd = bp->spq;
2302 bp->spq_prod_idx = 0;
2303 DP(NETIF_MSG_TIMER, "end of spq\n");
2304
2305 } else {
2306 bp->spq_prod_bd++;
2307 bp->spq_prod_idx++;
2308 }
2309
34f80b04 2310 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2311 bp->spq_prod_idx);
2312
34f80b04 2313 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2314 return 0;
2315}
2316
2317/* acquire split MCP access lock register */
4a37fb66 2318static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2319{
a2fbb9ea 2320 u32 i, j, val;
34f80b04 2321 int rc = 0;
a2fbb9ea
ET
2322
2323 might_sleep();
2324 i = 100;
2325 for (j = 0; j < i*10; j++) {
2326 val = (1UL << 31);
2327 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2328 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2329 if (val & (1L << 31))
2330 break;
2331
2332 msleep(5);
2333 }
a2fbb9ea 2334 if (!(val & (1L << 31))) {
19680c48 2335 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2336 rc = -EBUSY;
2337 }
2338
2339 return rc;
2340}
2341
4a37fb66
YG
2342/* release split MCP access lock register */
2343static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2344{
2345 u32 val = 0;
2346
2347 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2348}
2349
2350static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2351{
2352 struct host_def_status_block *def_sb = bp->def_status_blk;
2353 u16 rc = 0;
2354
2355 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2356 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2357 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2358 rc |= 1;
2359 }
2360 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2361 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2362 rc |= 2;
2363 }
2364 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2365 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2366 rc |= 4;
2367 }
2368 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2369 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2370 rc |= 8;
2371 }
2372 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2373 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2374 rc |= 16;
2375 }
2376 return rc;
2377}
2378
2379/*
2380 * slow path service functions
2381 */
2382
2383static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2384{
34f80b04 2385 int port = BP_PORT(bp);
5c862848
EG
2386 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2387 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2388 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2389 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2390 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2391 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2392 u32 aeu_mask;
a2fbb9ea 2393
a2fbb9ea
ET
2394 if (bp->attn_state & asserted)
2395 BNX2X_ERR("IGU ERROR\n");
2396
3fcaf2e5
EG
2397 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2398 aeu_mask = REG_RD(bp, aeu_addr);
2399
a2fbb9ea 2400 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2401 aeu_mask, asserted);
2402 aeu_mask &= ~(asserted & 0xff);
2403 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2404
3fcaf2e5
EG
2405 REG_WR(bp, aeu_addr, aeu_mask);
2406 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2407
3fcaf2e5 2408 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2409 bp->attn_state |= asserted;
3fcaf2e5 2410 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2411
2412 if (asserted & ATTN_HARD_WIRED_MASK) {
2413 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2414
a5e9a7cf
EG
2415 bnx2x_acquire_phy_lock(bp);
2416
877e9aa4
ET
2417 /* save nig interrupt mask */
2418 bp->nig_mask = REG_RD(bp, nig_int_mask_addr);
2419 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2420
c18487ee 2421 bnx2x_link_attn(bp);
a2fbb9ea
ET
2422
2423 /* handle unicore attn? */
2424 }
2425 if (asserted & ATTN_SW_TIMER_4_FUNC)
2426 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2427
2428 if (asserted & GPIO_2_FUNC)
2429 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2430
2431 if (asserted & GPIO_3_FUNC)
2432 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2433
2434 if (asserted & GPIO_4_FUNC)
2435 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2436
2437 if (port == 0) {
2438 if (asserted & ATTN_GENERAL_ATTN_1) {
2439 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2440 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2441 }
2442 if (asserted & ATTN_GENERAL_ATTN_2) {
2443 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2444 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2445 }
2446 if (asserted & ATTN_GENERAL_ATTN_3) {
2447 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2448 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2449 }
2450 } else {
2451 if (asserted & ATTN_GENERAL_ATTN_4) {
2452 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2453 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2454 }
2455 if (asserted & ATTN_GENERAL_ATTN_5) {
2456 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2457 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2458 }
2459 if (asserted & ATTN_GENERAL_ATTN_6) {
2460 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2461 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2462 }
2463 }
2464
2465 } /* if hardwired */
2466
5c862848
EG
2467 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2468 asserted, hc_addr);
2469 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2470
2471 /* now set back the mask */
a5e9a7cf 2472 if (asserted & ATTN_NIG_FOR_FUNC) {
877e9aa4 2473 REG_WR(bp, nig_int_mask_addr, bp->nig_mask);
a5e9a7cf
EG
2474 bnx2x_release_phy_lock(bp);
2475 }
a2fbb9ea
ET
2476}
2477
877e9aa4 2478static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2479{
34f80b04 2480 int port = BP_PORT(bp);
877e9aa4
ET
2481 int reg_offset;
2482 u32 val;
2483
34f80b04
EG
2484 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2485 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2486
34f80b04 2487 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2488
2489 val = REG_RD(bp, reg_offset);
2490 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2491 REG_WR(bp, reg_offset, val);
2492
2493 BNX2X_ERR("SPIO5 hw attention\n");
2494
34f80b04 2495 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 2496 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
877e9aa4
ET
2497 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
2498 /* Fan failure attention */
2499
17de50b7 2500 /* The PHY reset is controlled by GPIO 1 */
877e9aa4 2501 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
17de50b7
EG
2502 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2503 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2504 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2505 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4 2506 /* mark the failure */
c18487ee 2507 bp->link_params.ext_phy_config &=
877e9aa4 2508 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2509 bp->link_params.ext_phy_config |=
877e9aa4
ET
2510 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2511 SHMEM_WR(bp,
2512 dev_info.port_hw_config[port].
2513 external_phy_config,
c18487ee 2514 bp->link_params.ext_phy_config);
877e9aa4
ET
2515 /* log the failure */
2516 printk(KERN_ERR PFX "Fan Failure on Network"
2517 " Controller %s has caused the driver to"
2518 " shutdown the card to prevent permanent"
2519 " damage. Please contact Dell Support for"
2520 " assistance\n", bp->dev->name);
2521 break;
2522
2523 default:
2524 break;
2525 }
2526 }
34f80b04
EG
2527
2528 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2529
2530 val = REG_RD(bp, reg_offset);
2531 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2532 REG_WR(bp, reg_offset, val);
2533
2534 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2535 (attn & HW_INTERRUT_ASSERT_SET_0));
2536 bnx2x_panic();
2537 }
877e9aa4
ET
2538}
2539
2540static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2541{
2542 u32 val;
2543
2544 if (attn & BNX2X_DOORQ_ASSERT) {
2545
2546 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2547 BNX2X_ERR("DB hw attention 0x%x\n", val);
2548 /* DORQ discard attention */
2549 if (val & 0x2)
2550 BNX2X_ERR("FATAL error from DORQ\n");
2551 }
34f80b04
EG
2552
2553 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2554
2555 int port = BP_PORT(bp);
2556 int reg_offset;
2557
2558 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2559 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2560
2561 val = REG_RD(bp, reg_offset);
2562 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2563 REG_WR(bp, reg_offset, val);
2564
2565 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2566 (attn & HW_INTERRUT_ASSERT_SET_1));
2567 bnx2x_panic();
2568 }
877e9aa4
ET
2569}
2570
2571static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2572{
2573 u32 val;
2574
2575 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2576
2577 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2578 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2579 /* CFC error attention */
2580 if (val & 0x2)
2581 BNX2X_ERR("FATAL error from CFC\n");
2582 }
2583
2584 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2585
2586 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2587 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2588 /* RQ_USDMDP_FIFO_OVERFLOW */
2589 if (val & 0x18000)
2590 BNX2X_ERR("FATAL error from PXP\n");
2591 }
34f80b04
EG
2592
2593 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2594
2595 int port = BP_PORT(bp);
2596 int reg_offset;
2597
2598 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2599 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2600
2601 val = REG_RD(bp, reg_offset);
2602 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2603 REG_WR(bp, reg_offset, val);
2604
2605 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2606 (attn & HW_INTERRUT_ASSERT_SET_2));
2607 bnx2x_panic();
2608 }
877e9aa4
ET
2609}
2610
2611static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2612{
34f80b04
EG
2613 u32 val;
2614
877e9aa4
ET
2615 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2616
34f80b04
EG
2617 if (attn & BNX2X_PMF_LINK_ASSERT) {
2618 int func = BP_FUNC(bp);
2619
2620 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2621 bnx2x__link_status_update(bp);
2622 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2623 DRV_STATUS_PMF)
2624 bnx2x_pmf_update(bp);
2625
2626 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2627
2628 BNX2X_ERR("MC assert!\n");
2629 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2630 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2631 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2632 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2633 bnx2x_panic();
2634
2635 } else if (attn & BNX2X_MCP_ASSERT) {
2636
2637 BNX2X_ERR("MCP assert!\n");
2638 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2639 bnx2x_fw_dump(bp);
877e9aa4
ET
2640
2641 } else
2642 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2643 }
2644
2645 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2646 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2647 if (attn & BNX2X_GRC_TIMEOUT) {
2648 val = CHIP_IS_E1H(bp) ?
2649 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2650 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2651 }
2652 if (attn & BNX2X_GRC_RSV) {
2653 val = CHIP_IS_E1H(bp) ?
2654 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2655 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2656 }
877e9aa4 2657 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2658 }
2659}
2660
2661static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2662{
a2fbb9ea
ET
2663 struct attn_route attn;
2664 struct attn_route group_mask;
34f80b04 2665 int port = BP_PORT(bp);
877e9aa4 2666 int index;
a2fbb9ea
ET
2667 u32 reg_addr;
2668 u32 val;
3fcaf2e5 2669 u32 aeu_mask;
a2fbb9ea
ET
2670
2671 /* need to take HW lock because MCP or other port might also
2672 try to handle this event */
4a37fb66 2673 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2674
2675 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2676 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2677 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2678 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2679 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2680 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2681
2682 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2683 if (deasserted & (1 << index)) {
2684 group_mask = bp->attn_group[index];
2685
34f80b04
EG
2686 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2687 index, group_mask.sig[0], group_mask.sig[1],
2688 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2689
877e9aa4
ET
2690 bnx2x_attn_int_deasserted3(bp,
2691 attn.sig[3] & group_mask.sig[3]);
2692 bnx2x_attn_int_deasserted1(bp,
2693 attn.sig[1] & group_mask.sig[1]);
2694 bnx2x_attn_int_deasserted2(bp,
2695 attn.sig[2] & group_mask.sig[2]);
2696 bnx2x_attn_int_deasserted0(bp,
2697 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2698
a2fbb9ea
ET
2699 if ((attn.sig[0] & group_mask.sig[0] &
2700 HW_PRTY_ASSERT_SET_0) ||
2701 (attn.sig[1] & group_mask.sig[1] &
2702 HW_PRTY_ASSERT_SET_1) ||
2703 (attn.sig[2] & group_mask.sig[2] &
2704 HW_PRTY_ASSERT_SET_2))
6378c025 2705 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2706 }
2707 }
2708
4a37fb66 2709 bnx2x_release_alr(bp);
a2fbb9ea 2710
5c862848 2711 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2712
2713 val = ~deasserted;
3fcaf2e5
EG
2714 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2715 val, reg_addr);
5c862848 2716 REG_WR(bp, reg_addr, val);
a2fbb9ea 2717
a2fbb9ea 2718 if (~bp->attn_state & deasserted)
3fcaf2e5 2719 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2720
2721 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2722 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2723
3fcaf2e5
EG
2724 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2725 aeu_mask = REG_RD(bp, reg_addr);
2726
2727 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2728 aeu_mask, deasserted);
2729 aeu_mask |= (deasserted & 0xff);
2730 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2731
3fcaf2e5
EG
2732 REG_WR(bp, reg_addr, aeu_mask);
2733 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2734
2735 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2736 bp->attn_state &= ~deasserted;
2737 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2738}
2739
2740static void bnx2x_attn_int(struct bnx2x *bp)
2741{
2742 /* read local copy of bits */
68d59484
EG
2743 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2744 attn_bits);
2745 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2746 attn_bits_ack);
a2fbb9ea
ET
2747 u32 attn_state = bp->attn_state;
2748
2749 /* look for changed bits */
2750 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2751 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2752
2753 DP(NETIF_MSG_HW,
2754 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2755 attn_bits, attn_ack, asserted, deasserted);
2756
2757 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2758 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2759
2760 /* handle bits that were raised */
2761 if (asserted)
2762 bnx2x_attn_int_asserted(bp, asserted);
2763
2764 if (deasserted)
2765 bnx2x_attn_int_deasserted(bp, deasserted);
2766}
2767
2768static void bnx2x_sp_task(struct work_struct *work)
2769{
1cf167f2 2770 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
2771 u16 status;
2772
34f80b04 2773
a2fbb9ea
ET
2774 /* Return here if interrupt is disabled */
2775 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2776 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2777 return;
2778 }
2779
2780 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2781/* if (status == 0) */
2782/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2783
3196a88a 2784 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2785
877e9aa4
ET
2786 /* HW attentions */
2787 if (status & 0x1)
a2fbb9ea 2788 bnx2x_attn_int(bp);
a2fbb9ea 2789
68d59484 2790 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
2791 IGU_INT_NOP, 1);
2792 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2793 IGU_INT_NOP, 1);
2794 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2795 IGU_INT_NOP, 1);
2796 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2797 IGU_INT_NOP, 1);
2798 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2799 IGU_INT_ENABLE, 1);
877e9aa4 2800
a2fbb9ea
ET
2801}
2802
2803static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2804{
2805 struct net_device *dev = dev_instance;
2806 struct bnx2x *bp = netdev_priv(dev);
2807
2808 /* Return here if interrupt is disabled */
2809 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2810 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2811 return IRQ_HANDLED;
2812 }
2813
8d9c5f34 2814 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2815
2816#ifdef BNX2X_STOP_ON_ERROR
2817 if (unlikely(bp->panic))
2818 return IRQ_HANDLED;
2819#endif
2820
1cf167f2 2821 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
2822
2823 return IRQ_HANDLED;
2824}
2825
2826/* end of slow path */
2827
2828/* Statistics */
2829
2830/****************************************************************************
2831* Macros
2832****************************************************************************/
2833
a2fbb9ea
ET
2834/* sum[hi:lo] += add[hi:lo] */
2835#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2836 do { \
2837 s_lo += a_lo; \
f5ba6772 2838 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
2839 } while (0)
2840
2841/* difference = minuend - subtrahend */
2842#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2843 do { \
bb2a0f7a
YG
2844 if (m_lo < s_lo) { \
2845 /* underflow */ \
a2fbb9ea 2846 d_hi = m_hi - s_hi; \
bb2a0f7a 2847 if (d_hi > 0) { \
6378c025 2848 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2849 d_hi--; \
2850 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 2851 } else { \
6378c025 2852 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2853 d_hi = 0; \
2854 d_lo = 0; \
2855 } \
bb2a0f7a
YG
2856 } else { \
2857 /* m_lo >= s_lo */ \
a2fbb9ea 2858 if (m_hi < s_hi) { \
bb2a0f7a
YG
2859 d_hi = 0; \
2860 d_lo = 0; \
2861 } else { \
6378c025 2862 /* m_hi >= s_hi */ \
bb2a0f7a
YG
2863 d_hi = m_hi - s_hi; \
2864 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2865 } \
2866 } \
2867 } while (0)
2868
bb2a0f7a 2869#define UPDATE_STAT64(s, t) \
a2fbb9ea 2870 do { \
bb2a0f7a
YG
2871 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2872 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2873 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2874 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2875 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2876 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
2877 } while (0)
2878
bb2a0f7a 2879#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 2880 do { \
bb2a0f7a
YG
2881 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2882 diff.lo, new->s##_lo, old->s##_lo); \
2883 ADD_64(estats->t##_hi, diff.hi, \
2884 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
2885 } while (0)
2886
2887/* sum[hi:lo] += add */
2888#define ADD_EXTEND_64(s_hi, s_lo, a) \
2889 do { \
2890 s_lo += a; \
2891 s_hi += (s_lo < a) ? 1 : 0; \
2892 } while (0)
2893
bb2a0f7a 2894#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 2895 do { \
bb2a0f7a
YG
2896 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2897 pstats->mac_stx[1].s##_lo, \
2898 new->s); \
a2fbb9ea
ET
2899 } while (0)
2900
bb2a0f7a 2901#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea
ET
2902 do { \
2903 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2904 old_tclient->s = le32_to_cpu(tclient->s); \
de832a55
EG
2905 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
2906 } while (0)
2907
2908#define UPDATE_EXTEND_USTAT(s, t) \
2909 do { \
2910 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
2911 old_uclient->s = uclient->s; \
2912 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
2913 } while (0)
2914
2915#define UPDATE_EXTEND_XSTAT(s, t) \
2916 do { \
2917 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
2918 old_xclient->s = le32_to_cpu(xclient->s); \
de832a55
EG
2919 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
2920 } while (0)
2921
2922/* minuend -= subtrahend */
2923#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
2924 do { \
2925 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
2926 } while (0)
2927
2928/* minuend[hi:lo] -= subtrahend */
2929#define SUB_EXTEND_64(m_hi, m_lo, s) \
2930 do { \
2931 SUB_64(m_hi, 0, m_lo, s); \
2932 } while (0)
2933
2934#define SUB_EXTEND_USTAT(s, t) \
2935 do { \
2936 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
2937 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
2938 } while (0)
2939
2940/*
2941 * General service functions
2942 */
2943
2944static inline long bnx2x_hilo(u32 *hiref)
2945{
2946 u32 lo = *(hiref + 1);
2947#if (BITS_PER_LONG == 64)
2948 u32 hi = *hiref;
2949
2950 return HILO_U64(hi, lo);
2951#else
2952 return lo;
2953#endif
2954}
2955
2956/*
2957 * Init service functions
2958 */
2959
bb2a0f7a
YG
2960static void bnx2x_storm_stats_post(struct bnx2x *bp)
2961{
2962 if (!bp->stats_pending) {
2963 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 2964 int i, rc;
bb2a0f7a
YG
2965
2966 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 2967 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
2968 for_each_queue(bp, i)
2969 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
2970
2971 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
2972 ((u32 *)&ramrod_data)[1],
2973 ((u32 *)&ramrod_data)[0], 0);
2974 if (rc == 0) {
2975 /* stats ramrod has it's own slot on the spq */
2976 bp->spq_left++;
2977 bp->stats_pending = 1;
2978 }
2979 }
2980}
2981
2982static void bnx2x_stats_init(struct bnx2x *bp)
2983{
2984 int port = BP_PORT(bp);
de832a55 2985 int i;
bb2a0f7a 2986
de832a55 2987 bp->stats_pending = 0;
bb2a0f7a
YG
2988 bp->executer_idx = 0;
2989 bp->stats_counter = 0;
2990
2991 /* port stats */
2992 if (!BP_NOMCP(bp))
2993 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
2994 else
2995 bp->port.port_stx = 0;
2996 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
2997
2998 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
2999 bp->port.old_nig_stats.brb_discard =
3000 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3001 bp->port.old_nig_stats.brb_truncate =
3002 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3003 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3004 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3005 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3006 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3007
3008 /* function stats */
de832a55
EG
3009 for_each_queue(bp, i) {
3010 struct bnx2x_fastpath *fp = &bp->fp[i];
3011
3012 memset(&fp->old_tclient, 0,
3013 sizeof(struct tstorm_per_client_stats));
3014 memset(&fp->old_uclient, 0,
3015 sizeof(struct ustorm_per_client_stats));
3016 memset(&fp->old_xclient, 0,
3017 sizeof(struct xstorm_per_client_stats));
3018 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3019 }
3020
bb2a0f7a 3021 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
bb2a0f7a
YG
3022 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3023
3024 bp->stats_state = STATS_STATE_DISABLED;
3025 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3026 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3027}
3028
3029static void bnx2x_hw_stats_post(struct bnx2x *bp)
3030{
3031 struct dmae_command *dmae = &bp->stats_dmae;
3032 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3033
3034 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3035 if (CHIP_REV_IS_SLOW(bp))
3036 return;
bb2a0f7a
YG
3037
3038 /* loader */
3039 if (bp->executer_idx) {
3040 int loader_idx = PMF_DMAE_C(bp);
3041
3042 memset(dmae, 0, sizeof(struct dmae_command));
3043
3044 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3045 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3046 DMAE_CMD_DST_RESET |
3047#ifdef __BIG_ENDIAN
3048 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3049#else
3050 DMAE_CMD_ENDIANITY_DW_SWAP |
3051#endif
3052 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3053 DMAE_CMD_PORT_0) |
3054 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3055 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3056 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3057 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3058 sizeof(struct dmae_command) *
3059 (loader_idx + 1)) >> 2;
3060 dmae->dst_addr_hi = 0;
3061 dmae->len = sizeof(struct dmae_command) >> 2;
3062 if (CHIP_IS_E1(bp))
3063 dmae->len--;
3064 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3065 dmae->comp_addr_hi = 0;
3066 dmae->comp_val = 1;
3067
3068 *stats_comp = 0;
3069 bnx2x_post_dmae(bp, dmae, loader_idx);
3070
3071 } else if (bp->func_stx) {
3072 *stats_comp = 0;
3073 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3074 }
3075}
3076
3077static int bnx2x_stats_comp(struct bnx2x *bp)
3078{
3079 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3080 int cnt = 10;
3081
3082 might_sleep();
3083 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3084 if (!cnt) {
3085 BNX2X_ERR("timeout waiting for stats finished\n");
3086 break;
3087 }
3088 cnt--;
12469401 3089 msleep(1);
bb2a0f7a
YG
3090 }
3091 return 1;
3092}
3093
3094/*
3095 * Statistics service functions
3096 */
3097
3098static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3099{
3100 struct dmae_command *dmae;
3101 u32 opcode;
3102 int loader_idx = PMF_DMAE_C(bp);
3103 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3104
3105 /* sanity */
3106 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3107 BNX2X_ERR("BUG!\n");
3108 return;
3109 }
3110
3111 bp->executer_idx = 0;
3112
3113 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3114 DMAE_CMD_C_ENABLE |
3115 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3116#ifdef __BIG_ENDIAN
3117 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3118#else
3119 DMAE_CMD_ENDIANITY_DW_SWAP |
3120#endif
3121 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3122 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3123
3124 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3125 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3126 dmae->src_addr_lo = bp->port.port_stx >> 2;
3127 dmae->src_addr_hi = 0;
3128 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3129 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3130 dmae->len = DMAE_LEN32_RD_MAX;
3131 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3132 dmae->comp_addr_hi = 0;
3133 dmae->comp_val = 1;
3134
3135 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3136 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3137 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3138 dmae->src_addr_hi = 0;
7a9b2557
VZ
3139 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3140 DMAE_LEN32_RD_MAX * 4);
3141 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3142 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3143 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3144 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3145 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3146 dmae->comp_val = DMAE_COMP_VAL;
3147
3148 *stats_comp = 0;
3149 bnx2x_hw_stats_post(bp);
3150 bnx2x_stats_comp(bp);
3151}
3152
3153static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3154{
3155 struct dmae_command *dmae;
34f80b04 3156 int port = BP_PORT(bp);
bb2a0f7a 3157 int vn = BP_E1HVN(bp);
a2fbb9ea 3158 u32 opcode;
bb2a0f7a 3159 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3160 u32 mac_addr;
bb2a0f7a
YG
3161 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3162
3163 /* sanity */
3164 if (!bp->link_vars.link_up || !bp->port.pmf) {
3165 BNX2X_ERR("BUG!\n");
3166 return;
3167 }
a2fbb9ea
ET
3168
3169 bp->executer_idx = 0;
bb2a0f7a
YG
3170
3171 /* MCP */
3172 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3173 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3174 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3175#ifdef __BIG_ENDIAN
bb2a0f7a 3176 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3177#else
bb2a0f7a 3178 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3179#endif
bb2a0f7a
YG
3180 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3181 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3182
bb2a0f7a 3183 if (bp->port.port_stx) {
a2fbb9ea
ET
3184
3185 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3186 dmae->opcode = opcode;
bb2a0f7a
YG
3187 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3188 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3189 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3190 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3191 dmae->len = sizeof(struct host_port_stats) >> 2;
3192 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3193 dmae->comp_addr_hi = 0;
3194 dmae->comp_val = 1;
a2fbb9ea
ET
3195 }
3196
bb2a0f7a
YG
3197 if (bp->func_stx) {
3198
3199 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3200 dmae->opcode = opcode;
3201 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3202 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3203 dmae->dst_addr_lo = bp->func_stx >> 2;
3204 dmae->dst_addr_hi = 0;
3205 dmae->len = sizeof(struct host_func_stats) >> 2;
3206 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3207 dmae->comp_addr_hi = 0;
3208 dmae->comp_val = 1;
a2fbb9ea
ET
3209 }
3210
bb2a0f7a 3211 /* MAC */
a2fbb9ea
ET
3212 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3213 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3214 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3215#ifdef __BIG_ENDIAN
3216 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3217#else
3218 DMAE_CMD_ENDIANITY_DW_SWAP |
3219#endif
bb2a0f7a
YG
3220 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3221 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3222
c18487ee 3223 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3224
3225 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3226 NIG_REG_INGRESS_BMAC0_MEM);
3227
3228 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3229 BIGMAC_REGISTER_TX_STAT_GTBYT */
3230 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3231 dmae->opcode = opcode;
3232 dmae->src_addr_lo = (mac_addr +
3233 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3234 dmae->src_addr_hi = 0;
3235 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3236 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3237 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3238 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3239 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3240 dmae->comp_addr_hi = 0;
3241 dmae->comp_val = 1;
3242
3243 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3244 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3245 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3246 dmae->opcode = opcode;
3247 dmae->src_addr_lo = (mac_addr +
3248 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3249 dmae->src_addr_hi = 0;
3250 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3251 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3252 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3253 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3254 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3255 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3256 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3257 dmae->comp_addr_hi = 0;
3258 dmae->comp_val = 1;
3259
c18487ee 3260 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3261
3262 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3263
3264 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3265 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3266 dmae->opcode = opcode;
3267 dmae->src_addr_lo = (mac_addr +
3268 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3269 dmae->src_addr_hi = 0;
3270 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3271 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3272 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3273 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3274 dmae->comp_addr_hi = 0;
3275 dmae->comp_val = 1;
3276
3277 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3278 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3279 dmae->opcode = opcode;
3280 dmae->src_addr_lo = (mac_addr +
3281 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3282 dmae->src_addr_hi = 0;
3283 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3284 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3285 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3286 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3287 dmae->len = 1;
3288 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3289 dmae->comp_addr_hi = 0;
3290 dmae->comp_val = 1;
3291
3292 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3293 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3294 dmae->opcode = opcode;
3295 dmae->src_addr_lo = (mac_addr +
3296 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3297 dmae->src_addr_hi = 0;
3298 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3299 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3300 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3301 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3302 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3303 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3304 dmae->comp_addr_hi = 0;
3305 dmae->comp_val = 1;
3306 }
3307
3308 /* NIG */
bb2a0f7a
YG
3309 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3310 dmae->opcode = opcode;
3311 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3312 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3313 dmae->src_addr_hi = 0;
3314 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3315 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3316 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3317 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3318 dmae->comp_addr_hi = 0;
3319 dmae->comp_val = 1;
3320
3321 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3322 dmae->opcode = opcode;
3323 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3324 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3325 dmae->src_addr_hi = 0;
3326 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3327 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3328 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3329 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3330 dmae->len = (2*sizeof(u32)) >> 2;
3331 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3332 dmae->comp_addr_hi = 0;
3333 dmae->comp_val = 1;
3334
a2fbb9ea
ET
3335 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3336 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3337 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3338 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3339#ifdef __BIG_ENDIAN
3340 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3341#else
3342 DMAE_CMD_ENDIANITY_DW_SWAP |
3343#endif
bb2a0f7a
YG
3344 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3345 (vn << DMAE_CMD_E1HVN_SHIFT));
3346 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3347 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3348 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3349 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3350 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3351 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3352 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3353 dmae->len = (2*sizeof(u32)) >> 2;
3354 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3355 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3356 dmae->comp_val = DMAE_COMP_VAL;
3357
3358 *stats_comp = 0;
a2fbb9ea
ET
3359}
3360
bb2a0f7a 3361static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3362{
bb2a0f7a
YG
3363 struct dmae_command *dmae = &bp->stats_dmae;
3364 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3365
bb2a0f7a
YG
3366 /* sanity */
3367 if (!bp->func_stx) {
3368 BNX2X_ERR("BUG!\n");
3369 return;
3370 }
a2fbb9ea 3371
bb2a0f7a
YG
3372 bp->executer_idx = 0;
3373 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3374
bb2a0f7a
YG
3375 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3376 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3377 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3378#ifdef __BIG_ENDIAN
3379 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3380#else
3381 DMAE_CMD_ENDIANITY_DW_SWAP |
3382#endif
3383 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3384 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3385 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3386 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3387 dmae->dst_addr_lo = bp->func_stx >> 2;
3388 dmae->dst_addr_hi = 0;
3389 dmae->len = sizeof(struct host_func_stats) >> 2;
3390 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3391 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3392 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3393
bb2a0f7a
YG
3394 *stats_comp = 0;
3395}
a2fbb9ea 3396
bb2a0f7a
YG
3397static void bnx2x_stats_start(struct bnx2x *bp)
3398{
3399 if (bp->port.pmf)
3400 bnx2x_port_stats_init(bp);
3401
3402 else if (bp->func_stx)
3403 bnx2x_func_stats_init(bp);
3404
3405 bnx2x_hw_stats_post(bp);
3406 bnx2x_storm_stats_post(bp);
3407}
3408
3409static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3410{
3411 bnx2x_stats_comp(bp);
3412 bnx2x_stats_pmf_update(bp);
3413 bnx2x_stats_start(bp);
3414}
3415
3416static void bnx2x_stats_restart(struct bnx2x *bp)
3417{
3418 bnx2x_stats_comp(bp);
3419 bnx2x_stats_start(bp);
3420}
3421
3422static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3423{
3424 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3425 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3426 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3427 struct regpair diff;
3428
3429 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3430 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3431 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3432 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3433 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3434 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3435 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3436 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3437 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3438 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3439 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3440 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3441 UPDATE_STAT64(tx_stat_gt127,
3442 tx_stat_etherstatspkts65octetsto127octets);
3443 UPDATE_STAT64(tx_stat_gt255,
3444 tx_stat_etherstatspkts128octetsto255octets);
3445 UPDATE_STAT64(tx_stat_gt511,
3446 tx_stat_etherstatspkts256octetsto511octets);
3447 UPDATE_STAT64(tx_stat_gt1023,
3448 tx_stat_etherstatspkts512octetsto1023octets);
3449 UPDATE_STAT64(tx_stat_gt1518,
3450 tx_stat_etherstatspkts1024octetsto1522octets);
3451 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3452 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3453 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3454 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3455 UPDATE_STAT64(tx_stat_gterr,
3456 tx_stat_dot3statsinternalmactransmiterrors);
3457 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3458
3459 estats->pause_frames_received_hi =
3460 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3461 estats->pause_frames_received_lo =
3462 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3463
3464 estats->pause_frames_sent_hi =
3465 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3466 estats->pause_frames_sent_lo =
3467 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3468}
3469
3470static void bnx2x_emac_stats_update(struct bnx2x *bp)
3471{
3472 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3473 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3474 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3475
3476 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3477 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3478 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3479 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3480 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3481 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3482 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3483 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3484 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3485 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3486 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3487 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3488 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3489 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3490 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3491 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3492 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3493 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3494 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3495 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3496 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3497 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3498 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3499 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3500 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3501 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3502 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3503 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3504 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3505 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3506 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3507
3508 estats->pause_frames_received_hi =
3509 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3510 estats->pause_frames_received_lo =
3511 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3512 ADD_64(estats->pause_frames_received_hi,
3513 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3514 estats->pause_frames_received_lo,
3515 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3516
3517 estats->pause_frames_sent_hi =
3518 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3519 estats->pause_frames_sent_lo =
3520 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3521 ADD_64(estats->pause_frames_sent_hi,
3522 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3523 estats->pause_frames_sent_lo,
3524 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3525}
3526
3527static int bnx2x_hw_stats_update(struct bnx2x *bp)
3528{
3529 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3530 struct nig_stats *old = &(bp->port.old_nig_stats);
3531 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3532 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3533 struct regpair diff;
de832a55 3534 u32 nig_timer_max;
bb2a0f7a
YG
3535
3536 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3537 bnx2x_bmac_stats_update(bp);
3538
3539 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3540 bnx2x_emac_stats_update(bp);
3541
3542 else { /* unreached */
3543 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3544 return -1;
3545 }
a2fbb9ea 3546
bb2a0f7a
YG
3547 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3548 new->brb_discard - old->brb_discard);
66e855f3
YG
3549 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3550 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3551
bb2a0f7a
YG
3552 UPDATE_STAT64_NIG(egress_mac_pkt0,
3553 etherstatspkts1024octetsto1522octets);
3554 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3555
bb2a0f7a 3556 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3557
bb2a0f7a
YG
3558 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3559 sizeof(struct mac_stx));
3560 estats->brb_drop_hi = pstats->brb_drop_hi;
3561 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3562
bb2a0f7a 3563 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3564
de832a55
EG
3565 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3566 if (nig_timer_max != estats->nig_timer_max) {
3567 estats->nig_timer_max = nig_timer_max;
3568 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3569 }
3570
bb2a0f7a 3571 return 0;
a2fbb9ea
ET
3572}
3573
bb2a0f7a 3574static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3575{
3576 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 3577 struct tstorm_per_port_stats *tport =
de832a55 3578 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
3579 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3580 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
3581 int i;
3582
3583 memset(&(fstats->total_bytes_received_hi), 0,
3584 sizeof(struct host_func_stats) - 2*sizeof(u32));
3585 estats->error_bytes_received_hi = 0;
3586 estats->error_bytes_received_lo = 0;
3587 estats->etherstatsoverrsizepkts_hi = 0;
3588 estats->etherstatsoverrsizepkts_lo = 0;
3589 estats->no_buff_discard_hi = 0;
3590 estats->no_buff_discard_lo = 0;
a2fbb9ea 3591
de832a55
EG
3592 for_each_queue(bp, i) {
3593 struct bnx2x_fastpath *fp = &bp->fp[i];
3594 int cl_id = fp->cl_id;
3595 struct tstorm_per_client_stats *tclient =
3596 &stats->tstorm_common.client_statistics[cl_id];
3597 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3598 struct ustorm_per_client_stats *uclient =
3599 &stats->ustorm_common.client_statistics[cl_id];
3600 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3601 struct xstorm_per_client_stats *xclient =
3602 &stats->xstorm_common.client_statistics[cl_id];
3603 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3604 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3605 u32 diff;
3606
3607 /* are storm stats valid? */
3608 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 3609 bp->stats_counter) {
de832a55
EG
3610 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3611 " xstorm counter (%d) != stats_counter (%d)\n",
3612 i, xclient->stats_counter, bp->stats_counter);
3613 return -1;
3614 }
3615 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 3616 bp->stats_counter) {
de832a55
EG
3617 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3618 " tstorm counter (%d) != stats_counter (%d)\n",
3619 i, tclient->stats_counter, bp->stats_counter);
3620 return -2;
3621 }
3622 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3623 bp->stats_counter) {
3624 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3625 " ustorm counter (%d) != stats_counter (%d)\n",
3626 i, uclient->stats_counter, bp->stats_counter);
3627 return -4;
3628 }
a2fbb9ea 3629
de832a55
EG
3630 qstats->total_bytes_received_hi =
3631 qstats->valid_bytes_received_hi =
a2fbb9ea 3632 le32_to_cpu(tclient->total_rcv_bytes.hi);
de832a55
EG
3633 qstats->total_bytes_received_lo =
3634 qstats->valid_bytes_received_lo =
a2fbb9ea 3635 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a 3636
de832a55 3637 qstats->error_bytes_received_hi =
bb2a0f7a 3638 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 3639 qstats->error_bytes_received_lo =
bb2a0f7a 3640 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 3641
de832a55
EG
3642 ADD_64(qstats->total_bytes_received_hi,
3643 qstats->error_bytes_received_hi,
3644 qstats->total_bytes_received_lo,
3645 qstats->error_bytes_received_lo);
3646
3647 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3648 total_unicast_packets_received);
3649 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3650 total_multicast_packets_received);
3651 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3652 total_broadcast_packets_received);
3653 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3654 etherstatsoverrsizepkts);
3655 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3656
3657 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3658 total_unicast_packets_received);
3659 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3660 total_multicast_packets_received);
3661 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3662 total_broadcast_packets_received);
3663 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3664 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3665 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3666
3667 qstats->total_bytes_transmitted_hi =
bb2a0f7a 3668 le32_to_cpu(xclient->total_sent_bytes.hi);
de832a55 3669 qstats->total_bytes_transmitted_lo =
bb2a0f7a
YG
3670 le32_to_cpu(xclient->total_sent_bytes.lo);
3671
de832a55
EG
3672 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3673 total_unicast_packets_transmitted);
3674 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3675 total_multicast_packets_transmitted);
3676 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3677 total_broadcast_packets_transmitted);
3678
3679 old_tclient->checksum_discard = tclient->checksum_discard;
3680 old_tclient->ttl0_discard = tclient->ttl0_discard;
3681
3682 ADD_64(fstats->total_bytes_received_hi,
3683 qstats->total_bytes_received_hi,
3684 fstats->total_bytes_received_lo,
3685 qstats->total_bytes_received_lo);
3686 ADD_64(fstats->total_bytes_transmitted_hi,
3687 qstats->total_bytes_transmitted_hi,
3688 fstats->total_bytes_transmitted_lo,
3689 qstats->total_bytes_transmitted_lo);
3690 ADD_64(fstats->total_unicast_packets_received_hi,
3691 qstats->total_unicast_packets_received_hi,
3692 fstats->total_unicast_packets_received_lo,
3693 qstats->total_unicast_packets_received_lo);
3694 ADD_64(fstats->total_multicast_packets_received_hi,
3695 qstats->total_multicast_packets_received_hi,
3696 fstats->total_multicast_packets_received_lo,
3697 qstats->total_multicast_packets_received_lo);
3698 ADD_64(fstats->total_broadcast_packets_received_hi,
3699 qstats->total_broadcast_packets_received_hi,
3700 fstats->total_broadcast_packets_received_lo,
3701 qstats->total_broadcast_packets_received_lo);
3702 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3703 qstats->total_unicast_packets_transmitted_hi,
3704 fstats->total_unicast_packets_transmitted_lo,
3705 qstats->total_unicast_packets_transmitted_lo);
3706 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3707 qstats->total_multicast_packets_transmitted_hi,
3708 fstats->total_multicast_packets_transmitted_lo,
3709 qstats->total_multicast_packets_transmitted_lo);
3710 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3711 qstats->total_broadcast_packets_transmitted_hi,
3712 fstats->total_broadcast_packets_transmitted_lo,
3713 qstats->total_broadcast_packets_transmitted_lo);
3714 ADD_64(fstats->valid_bytes_received_hi,
3715 qstats->valid_bytes_received_hi,
3716 fstats->valid_bytes_received_lo,
3717 qstats->valid_bytes_received_lo);
3718
3719 ADD_64(estats->error_bytes_received_hi,
3720 qstats->error_bytes_received_hi,
3721 estats->error_bytes_received_lo,
3722 qstats->error_bytes_received_lo);
3723 ADD_64(estats->etherstatsoverrsizepkts_hi,
3724 qstats->etherstatsoverrsizepkts_hi,
3725 estats->etherstatsoverrsizepkts_lo,
3726 qstats->etherstatsoverrsizepkts_lo);
3727 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3728 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3729 }
3730
3731 ADD_64(fstats->total_bytes_received_hi,
3732 estats->rx_stat_ifhcinbadoctets_hi,
3733 fstats->total_bytes_received_lo,
3734 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
3735
3736 memcpy(estats, &(fstats->total_bytes_received_hi),
3737 sizeof(struct host_func_stats) - 2*sizeof(u32));
3738
de832a55
EG
3739 ADD_64(estats->etherstatsoverrsizepkts_hi,
3740 estats->rx_stat_dot3statsframestoolong_hi,
3741 estats->etherstatsoverrsizepkts_lo,
3742 estats->rx_stat_dot3statsframestoolong_lo);
3743 ADD_64(estats->error_bytes_received_hi,
3744 estats->rx_stat_ifhcinbadoctets_hi,
3745 estats->error_bytes_received_lo,
3746 estats->rx_stat_ifhcinbadoctets_lo);
3747
3748 if (bp->port.pmf) {
3749 estats->mac_filter_discard =
3750 le32_to_cpu(tport->mac_filter_discard);
3751 estats->xxoverflow_discard =
3752 le32_to_cpu(tport->xxoverflow_discard);
3753 estats->brb_truncate_discard =
bb2a0f7a 3754 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
3755 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3756 }
bb2a0f7a
YG
3757
3758 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 3759
de832a55
EG
3760 bp->stats_pending = 0;
3761
a2fbb9ea
ET
3762 return 0;
3763}
3764
bb2a0f7a 3765static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3766{
bb2a0f7a 3767 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3768 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 3769 int i;
a2fbb9ea
ET
3770
3771 nstats->rx_packets =
3772 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3773 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3774 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3775
3776 nstats->tx_packets =
3777 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3778 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3779 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3780
de832a55 3781 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 3782
0e39e645 3783 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3784
de832a55
EG
3785 nstats->rx_dropped = estats->mac_discard;
3786 for_each_queue(bp, i)
3787 nstats->rx_dropped +=
3788 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3789
a2fbb9ea
ET
3790 nstats->tx_dropped = 0;
3791
3792 nstats->multicast =
de832a55 3793 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 3794
bb2a0f7a 3795 nstats->collisions =
de832a55 3796 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
3797
3798 nstats->rx_length_errors =
de832a55
EG
3799 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3800 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3801 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3802 bnx2x_hilo(&estats->brb_truncate_hi);
3803 nstats->rx_crc_errors =
3804 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3805 nstats->rx_frame_errors =
3806 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3807 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
3808 nstats->rx_missed_errors = estats->xxoverflow_discard;
3809
3810 nstats->rx_errors = nstats->rx_length_errors +
3811 nstats->rx_over_errors +
3812 nstats->rx_crc_errors +
3813 nstats->rx_frame_errors +
0e39e645
ET
3814 nstats->rx_fifo_errors +
3815 nstats->rx_missed_errors;
a2fbb9ea 3816
bb2a0f7a 3817 nstats->tx_aborted_errors =
de832a55
EG
3818 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3819 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3820 nstats->tx_carrier_errors =
3821 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
3822 nstats->tx_fifo_errors = 0;
3823 nstats->tx_heartbeat_errors = 0;
3824 nstats->tx_window_errors = 0;
3825
3826 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
3827 nstats->tx_carrier_errors +
3828 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3829}
3830
3831static void bnx2x_drv_stats_update(struct bnx2x *bp)
3832{
3833 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3834 int i;
3835
3836 estats->driver_xoff = 0;
3837 estats->rx_err_discard_pkt = 0;
3838 estats->rx_skb_alloc_failed = 0;
3839 estats->hw_csum_err = 0;
3840 for_each_queue(bp, i) {
3841 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3842
3843 estats->driver_xoff += qstats->driver_xoff;
3844 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3845 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3846 estats->hw_csum_err += qstats->hw_csum_err;
3847 }
a2fbb9ea
ET
3848}
3849
bb2a0f7a 3850static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3851{
bb2a0f7a 3852 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3853
bb2a0f7a
YG
3854 if (*stats_comp != DMAE_COMP_VAL)
3855 return;
3856
3857 if (bp->port.pmf)
de832a55 3858 bnx2x_hw_stats_update(bp);
a2fbb9ea 3859
de832a55
EG
3860 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3861 BNX2X_ERR("storm stats were not updated for 3 times\n");
3862 bnx2x_panic();
3863 return;
a2fbb9ea
ET
3864 }
3865
de832a55
EG
3866 bnx2x_net_stats_update(bp);
3867 bnx2x_drv_stats_update(bp);
3868
a2fbb9ea 3869 if (bp->msglevel & NETIF_MSG_TIMER) {
de832a55
EG
3870 struct tstorm_per_client_stats *old_tclient =
3871 &bp->fp->old_tclient;
3872 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 3873 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3874 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 3875 int i;
a2fbb9ea
ET
3876
3877 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3878 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3879 " tx pkt (%lx)\n",
3880 bnx2x_tx_avail(bp->fp),
7a9b2557 3881 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
3882 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3883 " rx pkt (%lx)\n",
7a9b2557
VZ
3884 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3885 bp->fp->rx_comp_cons),
3886 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
de832a55
EG
3887 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
3888 "brb truncate %u\n",
3889 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
3890 qstats->driver_xoff,
3891 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 3892 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 3893 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
3894 "mac_discard %u mac_filter_discard %u "
3895 "xxovrflow_discard %u brb_truncate_discard %u "
3896 "ttl0_discard %u\n",
bb2a0f7a 3897 old_tclient->checksum_discard,
de832a55
EG
3898 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
3899 bnx2x_hilo(&qstats->no_buff_discard_hi),
3900 estats->mac_discard, estats->mac_filter_discard,
3901 estats->xxoverflow_discard, estats->brb_truncate_discard,
bb2a0f7a 3902 old_tclient->ttl0_discard);
a2fbb9ea
ET
3903
3904 for_each_queue(bp, i) {
3905 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3906 bnx2x_fp(bp, i, tx_pkt),
3907 bnx2x_fp(bp, i, rx_pkt),
3908 bnx2x_fp(bp, i, rx_calls));
3909 }
3910 }
3911
bb2a0f7a
YG
3912 bnx2x_hw_stats_post(bp);
3913 bnx2x_storm_stats_post(bp);
3914}
a2fbb9ea 3915
bb2a0f7a
YG
3916static void bnx2x_port_stats_stop(struct bnx2x *bp)
3917{
3918 struct dmae_command *dmae;
3919 u32 opcode;
3920 int loader_idx = PMF_DMAE_C(bp);
3921 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3922
bb2a0f7a 3923 bp->executer_idx = 0;
a2fbb9ea 3924
bb2a0f7a
YG
3925 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3926 DMAE_CMD_C_ENABLE |
3927 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3928#ifdef __BIG_ENDIAN
bb2a0f7a 3929 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3930#else
bb2a0f7a 3931 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3932#endif
bb2a0f7a
YG
3933 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3934 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3935
3936 if (bp->port.port_stx) {
3937
3938 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3939 if (bp->func_stx)
3940 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3941 else
3942 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3943 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3944 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3945 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3946 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3947 dmae->len = sizeof(struct host_port_stats) >> 2;
3948 if (bp->func_stx) {
3949 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3950 dmae->comp_addr_hi = 0;
3951 dmae->comp_val = 1;
3952 } else {
3953 dmae->comp_addr_lo =
3954 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3955 dmae->comp_addr_hi =
3956 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3957 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3958
bb2a0f7a
YG
3959 *stats_comp = 0;
3960 }
a2fbb9ea
ET
3961 }
3962
bb2a0f7a
YG
3963 if (bp->func_stx) {
3964
3965 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3966 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3967 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3968 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3969 dmae->dst_addr_lo = bp->func_stx >> 2;
3970 dmae->dst_addr_hi = 0;
3971 dmae->len = sizeof(struct host_func_stats) >> 2;
3972 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3973 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3974 dmae->comp_val = DMAE_COMP_VAL;
3975
3976 *stats_comp = 0;
a2fbb9ea 3977 }
bb2a0f7a
YG
3978}
3979
3980static void bnx2x_stats_stop(struct bnx2x *bp)
3981{
3982 int update = 0;
3983
3984 bnx2x_stats_comp(bp);
3985
3986 if (bp->port.pmf)
3987 update = (bnx2x_hw_stats_update(bp) == 0);
3988
3989 update |= (bnx2x_storm_stats_update(bp) == 0);
3990
3991 if (update) {
3992 bnx2x_net_stats_update(bp);
a2fbb9ea 3993
bb2a0f7a
YG
3994 if (bp->port.pmf)
3995 bnx2x_port_stats_stop(bp);
3996
3997 bnx2x_hw_stats_post(bp);
3998 bnx2x_stats_comp(bp);
a2fbb9ea
ET
3999 }
4000}
4001
bb2a0f7a
YG
4002static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4003{
4004}
4005
4006static const struct {
4007 void (*action)(struct bnx2x *bp);
4008 enum bnx2x_stats_state next_state;
4009} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4010/* state event */
4011{
4012/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4013/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4014/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4015/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4016},
4017{
4018/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4019/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4020/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4021/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4022}
4023};
4024
4025static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4026{
4027 enum bnx2x_stats_state state = bp->stats_state;
4028
4029 bnx2x_stats_stm[state][event].action(bp);
4030 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4031
4032 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4033 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4034 state, event, bp->stats_state);
4035}
4036
a2fbb9ea
ET
4037static void bnx2x_timer(unsigned long data)
4038{
4039 struct bnx2x *bp = (struct bnx2x *) data;
4040
4041 if (!netif_running(bp->dev))
4042 return;
4043
4044 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4045 goto timer_restart;
a2fbb9ea
ET
4046
4047 if (poll) {
4048 struct bnx2x_fastpath *fp = &bp->fp[0];
4049 int rc;
4050
4051 bnx2x_tx_int(fp, 1000);
4052 rc = bnx2x_rx_int(fp, 1000);
4053 }
4054
34f80b04
EG
4055 if (!BP_NOMCP(bp)) {
4056 int func = BP_FUNC(bp);
a2fbb9ea
ET
4057 u32 drv_pulse;
4058 u32 mcp_pulse;
4059
4060 ++bp->fw_drv_pulse_wr_seq;
4061 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4062 /* TBD - add SYSTEM_TIME */
4063 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4064 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4065
34f80b04 4066 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4067 MCP_PULSE_SEQ_MASK);
4068 /* The delta between driver pulse and mcp response
4069 * should be 1 (before mcp response) or 0 (after mcp response)
4070 */
4071 if ((drv_pulse != mcp_pulse) &&
4072 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4073 /* someone lost a heartbeat... */
4074 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4075 drv_pulse, mcp_pulse);
4076 }
4077 }
4078
bb2a0f7a
YG
4079 if ((bp->state == BNX2X_STATE_OPEN) ||
4080 (bp->state == BNX2X_STATE_DISABLED))
4081 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4082
f1410647 4083timer_restart:
a2fbb9ea
ET
4084 mod_timer(&bp->timer, jiffies + bp->current_interval);
4085}
4086
4087/* end of Statistics */
4088
4089/* nic init */
4090
4091/*
4092 * nic init service functions
4093 */
4094
34f80b04 4095static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4096{
34f80b04
EG
4097 int port = BP_PORT(bp);
4098
4099 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4100 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4101 sizeof(struct ustorm_status_block)/4);
34f80b04
EG
4102 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4103 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4104 sizeof(struct cstorm_status_block)/4);
34f80b04
EG
4105}
4106
5c862848
EG
4107static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4108 dma_addr_t mapping, int sb_id)
34f80b04
EG
4109{
4110 int port = BP_PORT(bp);
bb2a0f7a 4111 int func = BP_FUNC(bp);
a2fbb9ea 4112 int index;
34f80b04 4113 u64 section;
a2fbb9ea
ET
4114
4115 /* USTORM */
4116 section = ((u64)mapping) + offsetof(struct host_status_block,
4117 u_status_block);
34f80b04 4118 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4119
4120 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4121 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4122 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4123 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4124 U64_HI(section));
bb2a0f7a
YG
4125 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4126 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4127
4128 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4129 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4130 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4131
4132 /* CSTORM */
4133 section = ((u64)mapping) + offsetof(struct host_status_block,
4134 c_status_block);
34f80b04 4135 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4136
4137 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4138 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4139 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4140 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4141 U64_HI(section));
7a9b2557
VZ
4142 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4143 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4144
4145 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4146 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4147 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4148
4149 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4150}
4151
4152static void bnx2x_zero_def_sb(struct bnx2x *bp)
4153{
4154 int func = BP_FUNC(bp);
a2fbb9ea 4155
34f80b04
EG
4156 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4157 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4158 sizeof(struct ustorm_def_status_block)/4);
4159 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4160 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4161 sizeof(struct cstorm_def_status_block)/4);
4162 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4163 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4164 sizeof(struct xstorm_def_status_block)/4);
4165 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4166 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4167 sizeof(struct tstorm_def_status_block)/4);
a2fbb9ea
ET
4168}
4169
4170static void bnx2x_init_def_sb(struct bnx2x *bp,
4171 struct host_def_status_block *def_sb,
34f80b04 4172 dma_addr_t mapping, int sb_id)
a2fbb9ea 4173{
34f80b04
EG
4174 int port = BP_PORT(bp);
4175 int func = BP_FUNC(bp);
a2fbb9ea
ET
4176 int index, val, reg_offset;
4177 u64 section;
4178
4179 /* ATTN */
4180 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4181 atten_status_block);
34f80b04 4182 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4183
49d66772
ET
4184 bp->attn_state = 0;
4185
a2fbb9ea
ET
4186 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4187 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4188
34f80b04 4189 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4190 bp->attn_group[index].sig[0] = REG_RD(bp,
4191 reg_offset + 0x10*index);
4192 bp->attn_group[index].sig[1] = REG_RD(bp,
4193 reg_offset + 0x4 + 0x10*index);
4194 bp->attn_group[index].sig[2] = REG_RD(bp,
4195 reg_offset + 0x8 + 0x10*index);
4196 bp->attn_group[index].sig[3] = REG_RD(bp,
4197 reg_offset + 0xc + 0x10*index);
4198 }
4199
a2fbb9ea
ET
4200 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4201 HC_REG_ATTN_MSG0_ADDR_L);
4202
4203 REG_WR(bp, reg_offset, U64_LO(section));
4204 REG_WR(bp, reg_offset + 4, U64_HI(section));
4205
4206 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4207
4208 val = REG_RD(bp, reg_offset);
34f80b04 4209 val |= sb_id;
a2fbb9ea
ET
4210 REG_WR(bp, reg_offset, val);
4211
4212 /* USTORM */
4213 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4214 u_def_status_block);
34f80b04 4215 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4216
4217 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4218 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4219 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4220 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4221 U64_HI(section));
5c862848 4222 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
34f80b04 4223 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4224
4225 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4226 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4227 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4228
4229 /* CSTORM */
4230 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4231 c_def_status_block);
34f80b04 4232 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4233
4234 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4235 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4236 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4237 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4238 U64_HI(section));
5c862848 4239 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
34f80b04 4240 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4241
4242 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4243 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4244 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4245
4246 /* TSTORM */
4247 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4248 t_def_status_block);
34f80b04 4249 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4250
4251 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4252 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4253 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4254 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4255 U64_HI(section));
5c862848 4256 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4257 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4258
4259 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4260 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4261 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4262
4263 /* XSTORM */
4264 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4265 x_def_status_block);
34f80b04 4266 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4267
4268 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4269 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4270 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4271 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4272 U64_HI(section));
5c862848 4273 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4274 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4275
4276 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4277 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4278 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4279
bb2a0f7a 4280 bp->stats_pending = 0;
66e855f3 4281 bp->set_mac_pending = 0;
bb2a0f7a 4282
34f80b04 4283 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4284}
4285
4286static void bnx2x_update_coalesce(struct bnx2x *bp)
4287{
34f80b04 4288 int port = BP_PORT(bp);
a2fbb9ea
ET
4289 int i;
4290
4291 for_each_queue(bp, i) {
34f80b04 4292 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4293
4294 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4295 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4296 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4297 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4298 bp->rx_ticks/12);
a2fbb9ea 4299 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4300 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848
EG
4301 U_SB_ETH_RX_CQ_INDEX),
4302 bp->rx_ticks ? 0 : 1);
a2fbb9ea
ET
4303
4304 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4305 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4306 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4307 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4308 bp->tx_ticks/12);
a2fbb9ea 4309 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4310 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4311 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4312 bp->tx_ticks ? 0 : 1);
a2fbb9ea
ET
4313 }
4314}
4315
7a9b2557
VZ
4316static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4317 struct bnx2x_fastpath *fp, int last)
4318{
4319 int i;
4320
4321 for (i = 0; i < last; i++) {
4322 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4323 struct sk_buff *skb = rx_buf->skb;
4324
4325 if (skb == NULL) {
4326 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4327 continue;
4328 }
4329
4330 if (fp->tpa_state[i] == BNX2X_TPA_START)
4331 pci_unmap_single(bp->pdev,
4332 pci_unmap_addr(rx_buf, mapping),
437cf2f1 4333 bp->rx_buf_size,
7a9b2557
VZ
4334 PCI_DMA_FROMDEVICE);
4335
4336 dev_kfree_skb(skb);
4337 rx_buf->skb = NULL;
4338 }
4339}
4340
a2fbb9ea
ET
4341static void bnx2x_init_rx_rings(struct bnx2x *bp)
4342{
7a9b2557 4343 int func = BP_FUNC(bp);
32626230
EG
4344 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4345 ETH_MAX_AGGREGATION_QUEUES_E1H;
4346 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4347 int i, j;
a2fbb9ea 4348
0f00846d
EG
4349 bp->rx_buf_size += bp->rx_offset + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4350 DP(NETIF_MSG_IFUP,
4351 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4352
7a9b2557 4353 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4354
555f6c78 4355 for_each_rx_queue(bp, j) {
32626230 4356 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4357
32626230 4358 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4359 fp->tpa_pool[i].skb =
4360 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4361 if (!fp->tpa_pool[i].skb) {
4362 BNX2X_ERR("Failed to allocate TPA "
4363 "skb pool for queue[%d] - "
4364 "disabling TPA on this "
4365 "queue!\n", j);
4366 bnx2x_free_tpa_pool(bp, fp, i);
4367 fp->disable_tpa = 1;
4368 break;
4369 }
4370 pci_unmap_addr_set((struct sw_rx_bd *)
4371 &bp->fp->tpa_pool[i],
4372 mapping, 0);
4373 fp->tpa_state[i] = BNX2X_TPA_STOP;
4374 }
4375 }
4376 }
4377
555f6c78 4378 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
4379 struct bnx2x_fastpath *fp = &bp->fp[j];
4380
4381 fp->rx_bd_cons = 0;
4382 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4383 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4384
4385 /* "next page" elements initialization */
4386 /* SGE ring */
4387 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4388 struct eth_rx_sge *sge;
4389
4390 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4391 sge->addr_hi =
4392 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4393 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4394 sge->addr_lo =
4395 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4396 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4397 }
4398
4399 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4400
7a9b2557 4401 /* RX BD ring */
a2fbb9ea
ET
4402 for (i = 1; i <= NUM_RX_RINGS; i++) {
4403 struct eth_rx_bd *rx_bd;
4404
4405 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4406 rx_bd->addr_hi =
4407 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4408 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4409 rx_bd->addr_lo =
4410 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4411 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4412 }
4413
34f80b04 4414 /* CQ ring */
a2fbb9ea
ET
4415 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4416 struct eth_rx_cqe_next_page *nextpg;
4417
4418 nextpg = (struct eth_rx_cqe_next_page *)
4419 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4420 nextpg->addr_hi =
4421 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4422 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4423 nextpg->addr_lo =
4424 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4425 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4426 }
4427
7a9b2557
VZ
4428 /* Allocate SGEs and initialize the ring elements */
4429 for (i = 0, ring_prod = 0;
4430 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4431
7a9b2557
VZ
4432 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4433 BNX2X_ERR("was only able to allocate "
4434 "%d rx sges\n", i);
4435 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4436 /* Cleanup already allocated elements */
4437 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4438 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4439 fp->disable_tpa = 1;
4440 ring_prod = 0;
4441 break;
4442 }
4443 ring_prod = NEXT_SGE_IDX(ring_prod);
4444 }
4445 fp->rx_sge_prod = ring_prod;
4446
4447 /* Allocate BDs and initialize BD ring */
66e855f3 4448 fp->rx_comp_cons = 0;
7a9b2557 4449 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4450 for (i = 0; i < bp->rx_ring_size; i++) {
4451 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4452 BNX2X_ERR("was only able to allocate "
de832a55
EG
4453 "%d rx skbs on queue[%d]\n", i, j);
4454 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4455 break;
4456 }
4457 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4458 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4459 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4460 }
4461
7a9b2557
VZ
4462 fp->rx_bd_prod = ring_prod;
4463 /* must not have more available CQEs than BDs */
4464 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4465 cqe_ring_prod);
a2fbb9ea
ET
4466 fp->rx_pkt = fp->rx_calls = 0;
4467
7a9b2557
VZ
4468 /* Warning!
4469 * this will generate an interrupt (to the TSTORM)
4470 * must only be done after chip is initialized
4471 */
4472 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4473 fp->rx_sge_prod);
a2fbb9ea
ET
4474 if (j != 0)
4475 continue;
4476
4477 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4478 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4479 U64_LO(fp->rx_comp_mapping));
4480 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4481 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4482 U64_HI(fp->rx_comp_mapping));
4483 }
4484}
4485
4486static void bnx2x_init_tx_ring(struct bnx2x *bp)
4487{
4488 int i, j;
4489
555f6c78 4490 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
4491 struct bnx2x_fastpath *fp = &bp->fp[j];
4492
4493 for (i = 1; i <= NUM_TX_RINGS; i++) {
4494 struct eth_tx_bd *tx_bd =
4495 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4496
4497 tx_bd->addr_hi =
4498 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4499 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4500 tx_bd->addr_lo =
4501 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4502 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4503 }
4504
4505 fp->tx_pkt_prod = 0;
4506 fp->tx_pkt_cons = 0;
4507 fp->tx_bd_prod = 0;
4508 fp->tx_bd_cons = 0;
4509 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4510 fp->tx_pkt = 0;
4511 }
4512}
4513
4514static void bnx2x_init_sp_ring(struct bnx2x *bp)
4515{
34f80b04 4516 int func = BP_FUNC(bp);
a2fbb9ea
ET
4517
4518 spin_lock_init(&bp->spq_lock);
4519
4520 bp->spq_left = MAX_SPQ_PENDING;
4521 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4522 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4523 bp->spq_prod_bd = bp->spq;
4524 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4525
34f80b04 4526 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4527 U64_LO(bp->spq_mapping));
34f80b04
EG
4528 REG_WR(bp,
4529 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4530 U64_HI(bp->spq_mapping));
4531
34f80b04 4532 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4533 bp->spq_prod_idx);
4534}
4535
4536static void bnx2x_init_context(struct bnx2x *bp)
4537{
4538 int i;
4539
4540 for_each_queue(bp, i) {
4541 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4542 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 4543 u8 cl_id = fp->cl_id;
34f80b04 4544 u8 sb_id = FP_SB_ID(fp);
a2fbb9ea 4545
34f80b04
EG
4546 context->ustorm_st_context.common.sb_index_numbers =
4547 BNX2X_RX_SB_INDEX_NUM;
4548 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4549 context->ustorm_st_context.common.status_block_id = sb_id;
4550 context->ustorm_st_context.common.flags =
de832a55
EG
4551 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4552 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4553 context->ustorm_st_context.common.statistics_counter_id =
4554 cl_id;
8d9c5f34 4555 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 4556 BNX2X_RX_ALIGN_SHIFT;
34f80b04 4557 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 4558 bp->rx_buf_size;
34f80b04 4559 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4560 U64_HI(fp->rx_desc_mapping);
34f80b04 4561 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4562 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4563 if (!fp->disable_tpa) {
4564 context->ustorm_st_context.common.flags |=
4565 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4566 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4567 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
4568 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4569 (u32)0xffff);
7a9b2557
VZ
4570 context->ustorm_st_context.common.sge_page_base_hi =
4571 U64_HI(fp->rx_sge_mapping);
4572 context->ustorm_st_context.common.sge_page_base_lo =
4573 U64_LO(fp->rx_sge_mapping);
4574 }
4575
8d9c5f34
EG
4576 context->ustorm_ag_context.cdu_usage =
4577 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4578 CDU_REGION_NUMBER_UCM_AG,
4579 ETH_CONNECTION_TYPE);
4580
4581 context->xstorm_st_context.tx_bd_page_base_hi =
4582 U64_HI(fp->tx_desc_mapping);
4583 context->xstorm_st_context.tx_bd_page_base_lo =
4584 U64_LO(fp->tx_desc_mapping);
4585 context->xstorm_st_context.db_data_addr_hi =
4586 U64_HI(fp->tx_prods_mapping);
4587 context->xstorm_st_context.db_data_addr_lo =
4588 U64_LO(fp->tx_prods_mapping);
4589 context->xstorm_st_context.statistics_data = (fp->cl_id |
4590 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea 4591 context->cstorm_st_context.sb_index_number =
5c862848 4592 C_SB_ETH_TX_CQ_INDEX;
34f80b04 4593 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4594
4595 context->xstorm_ag_context.cdu_reserved =
4596 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4597 CDU_REGION_NUMBER_XCM_AG,
4598 ETH_CONNECTION_TYPE);
a2fbb9ea
ET
4599 }
4600}
4601
4602static void bnx2x_init_ind_table(struct bnx2x *bp)
4603{
26c8fa4d 4604 int func = BP_FUNC(bp);
a2fbb9ea
ET
4605 int i;
4606
555f6c78 4607 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
4608 return;
4609
555f6c78
EG
4610 DP(NETIF_MSG_IFUP,
4611 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 4612 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 4613 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 4614 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
555f6c78 4615 BP_CL_ID(bp) + (i % bp->num_rx_queues));
a2fbb9ea
ET
4616}
4617
49d66772
ET
4618static void bnx2x_set_client_config(struct bnx2x *bp)
4619{
49d66772 4620 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4621 int port = BP_PORT(bp);
4622 int i;
49d66772 4623
e7799c5f 4624 tstorm_client.mtu = bp->dev->mtu;
49d66772 4625 tstorm_client.config_flags =
de832a55
EG
4626 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4627 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 4628#ifdef BCM_VLAN
0c6671b0 4629 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 4630 tstorm_client.config_flags |=
8d9c5f34 4631 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
4632 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4633 }
4634#endif
49d66772 4635
7a9b2557
VZ
4636 if (bp->flags & TPA_ENABLE_FLAG) {
4637 tstorm_client.max_sges_for_packet =
4f40f2cb 4638 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
4639 tstorm_client.max_sges_for_packet =
4640 ((tstorm_client.max_sges_for_packet +
4641 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4642 PAGES_PER_SGE_SHIFT;
4643
4644 tstorm_client.config_flags |=
4645 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4646 }
4647
49d66772 4648 for_each_queue(bp, i) {
de832a55
EG
4649 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4650
49d66772 4651 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4652 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4653 ((u32 *)&tstorm_client)[0]);
4654 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4655 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4656 ((u32 *)&tstorm_client)[1]);
4657 }
4658
34f80b04
EG
4659 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4660 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4661}
4662
a2fbb9ea
ET
4663static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4664{
a2fbb9ea 4665 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4666 int mode = bp->rx_mode;
4667 int mask = (1 << BP_L_ID(bp));
4668 int func = BP_FUNC(bp);
a2fbb9ea
ET
4669 int i;
4670
3196a88a 4671 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
4672
4673 switch (mode) {
4674 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4675 tstorm_mac_filter.ucast_drop_all = mask;
4676 tstorm_mac_filter.mcast_drop_all = mask;
4677 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea
ET
4678 break;
4679 case BNX2X_RX_MODE_NORMAL:
34f80b04 4680 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4681 break;
4682 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4683 tstorm_mac_filter.mcast_accept_all = mask;
4684 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4685 break;
4686 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4687 tstorm_mac_filter.ucast_accept_all = mask;
4688 tstorm_mac_filter.mcast_accept_all = mask;
4689 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4690 break;
4691 default:
34f80b04
EG
4692 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4693 break;
a2fbb9ea
ET
4694 }
4695
4696 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4697 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4698 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4699 ((u32 *)&tstorm_mac_filter)[i]);
4700
34f80b04 4701/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4702 ((u32 *)&tstorm_mac_filter)[i]); */
4703 }
a2fbb9ea 4704
49d66772
ET
4705 if (mode != BNX2X_RX_MODE_NONE)
4706 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4707}
4708
471de716
EG
4709static void bnx2x_init_internal_common(struct bnx2x *bp)
4710{
4711 int i;
4712
3cdf1db7
YG
4713 if (bp->flags & TPA_ENABLE_FLAG) {
4714 struct tstorm_eth_tpa_exist tpa = {0};
4715
4716 tpa.tpa_exist = 1;
4717
4718 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4719 ((u32 *)&tpa)[0]);
4720 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4721 ((u32 *)&tpa)[1]);
4722 }
4723
471de716
EG
4724 /* Zero this manually as its initialization is
4725 currently missing in the initTool */
4726 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4727 REG_WR(bp, BAR_USTRORM_INTMEM +
4728 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4729}
4730
4731static void bnx2x_init_internal_port(struct bnx2x *bp)
4732{
4733 int port = BP_PORT(bp);
4734
4735 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4736 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4737 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4738 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4739}
4740
8a1c38d1
EG
4741/* Calculates the sum of vn_min_rates.
4742 It's needed for further normalizing of the min_rates.
4743 Returns:
4744 sum of vn_min_rates.
4745 or
4746 0 - if all the min_rates are 0.
4747 In the later case fainess algorithm should be deactivated.
4748 If not all min_rates are zero then those that are zeroes will be set to 1.
4749 */
4750static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4751{
4752 int all_zero = 1;
4753 int port = BP_PORT(bp);
4754 int vn;
4755
4756 bp->vn_weight_sum = 0;
4757 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4758 int func = 2*vn + port;
4759 u32 vn_cfg =
4760 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4761 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4762 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4763
4764 /* Skip hidden vns */
4765 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4766 continue;
4767
4768 /* If min rate is zero - set it to 1 */
4769 if (!vn_min_rate)
4770 vn_min_rate = DEF_MIN_RATE;
4771 else
4772 all_zero = 0;
4773
4774 bp->vn_weight_sum += vn_min_rate;
4775 }
4776
4777 /* ... only if all min rates are zeros - disable fairness */
4778 if (all_zero)
4779 bp->vn_weight_sum = 0;
4780}
4781
471de716 4782static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4783{
a2fbb9ea
ET
4784 struct tstorm_eth_function_common_config tstorm_config = {0};
4785 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4786 int port = BP_PORT(bp);
4787 int func = BP_FUNC(bp);
de832a55
EG
4788 int i, j;
4789 u32 offset;
471de716 4790 u16 max_agg_size;
a2fbb9ea
ET
4791
4792 if (is_multi(bp)) {
555f6c78 4793 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
4794 tstorm_config.rss_result_mask = MULTI_MASK;
4795 }
8d9c5f34
EG
4796 if (IS_E1HMF(bp))
4797 tstorm_config.config_flags |=
4798 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 4799
34f80b04
EG
4800 tstorm_config.leading_client_id = BP_L_ID(bp);
4801
a2fbb9ea 4802 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4803 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4804 (*(u32 *)&tstorm_config));
4805
c14423fe 4806 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4807 bnx2x_set_storm_rx_mode(bp);
4808
de832a55
EG
4809 for_each_queue(bp, i) {
4810 u8 cl_id = bp->fp[i].cl_id;
4811
4812 /* reset xstorm per client statistics */
4813 offset = BAR_XSTRORM_INTMEM +
4814 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4815 for (j = 0;
4816 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4817 REG_WR(bp, offset + j*4, 0);
4818
4819 /* reset tstorm per client statistics */
4820 offset = BAR_TSTRORM_INTMEM +
4821 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4822 for (j = 0;
4823 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4824 REG_WR(bp, offset + j*4, 0);
4825
4826 /* reset ustorm per client statistics */
4827 offset = BAR_USTRORM_INTMEM +
4828 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4829 for (j = 0;
4830 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4831 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
4832 }
4833
4834 /* Init statistics related context */
34f80b04 4835 stats_flags.collect_eth = 1;
a2fbb9ea 4836
66e855f3 4837 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4838 ((u32 *)&stats_flags)[0]);
66e855f3 4839 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4840 ((u32 *)&stats_flags)[1]);
4841
66e855f3 4842 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4843 ((u32 *)&stats_flags)[0]);
66e855f3 4844 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4845 ((u32 *)&stats_flags)[1]);
4846
de832a55
EG
4847 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4848 ((u32 *)&stats_flags)[0]);
4849 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4850 ((u32 *)&stats_flags)[1]);
4851
66e855f3 4852 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4853 ((u32 *)&stats_flags)[0]);
66e855f3 4854 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4855 ((u32 *)&stats_flags)[1]);
4856
66e855f3
YG
4857 REG_WR(bp, BAR_XSTRORM_INTMEM +
4858 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4859 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4860 REG_WR(bp, BAR_XSTRORM_INTMEM +
4861 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4862 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4863
4864 REG_WR(bp, BAR_TSTRORM_INTMEM +
4865 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4866 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4867 REG_WR(bp, BAR_TSTRORM_INTMEM +
4868 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4869 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 4870
de832a55
EG
4871 REG_WR(bp, BAR_USTRORM_INTMEM +
4872 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4873 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4874 REG_WR(bp, BAR_USTRORM_INTMEM +
4875 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4876 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4877
34f80b04
EG
4878 if (CHIP_IS_E1H(bp)) {
4879 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4880 IS_E1HMF(bp));
4881 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4882 IS_E1HMF(bp));
4883 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4884 IS_E1HMF(bp));
4885 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4886 IS_E1HMF(bp));
4887
7a9b2557
VZ
4888 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4889 bp->e1hov);
34f80b04
EG
4890 }
4891
4f40f2cb
EG
4892 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4893 max_agg_size =
4894 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4895 SGE_PAGE_SIZE * PAGES_PER_SGE),
4896 (u32)0xffff);
555f6c78 4897 for_each_rx_queue(bp, i) {
7a9b2557 4898 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
4899
4900 REG_WR(bp, BAR_USTRORM_INTMEM +
4901 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4902 U64_LO(fp->rx_comp_mapping));
4903 REG_WR(bp, BAR_USTRORM_INTMEM +
4904 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4905 U64_HI(fp->rx_comp_mapping));
4906
7a9b2557
VZ
4907 REG_WR16(bp, BAR_USTRORM_INTMEM +
4908 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4909 max_agg_size);
4910 }
8a1c38d1
EG
4911
4912 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
4913
4914 /* Init rate shaping and fairness contexts */
4915 if (IS_E1HMF(bp)) {
4916 int vn;
4917
4918 /* During init there is no active link
4919 Until link is up, set link rate to 10Gbps */
4920 bp->link_vars.line_speed = SPEED_10000;
4921 bnx2x_init_port_minmax(bp);
4922
4923 bnx2x_calc_vn_weight_sum(bp);
4924
4925 for (vn = VN_0; vn < E1HVN_MAX; vn++)
4926 bnx2x_init_vn_minmax(bp, 2*vn + port);
4927
4928 /* Enable rate shaping and fairness */
4929 bp->cmng.flags.cmng_enables =
4930 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
4931 if (bp->vn_weight_sum)
4932 bp->cmng.flags.cmng_enables |=
4933 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
4934 else
4935 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
4936 " fairness will be disabled\n");
4937 } else {
4938 /* rate shaping and fairness are disabled */
4939 DP(NETIF_MSG_IFUP,
4940 "single function mode minmax will be disabled\n");
4941 }
4942
4943
4944 /* Store it to internal memory */
4945 if (bp->port.pmf)
4946 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
4947 REG_WR(bp, BAR_XSTRORM_INTMEM +
4948 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
4949 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
4950}
4951
471de716
EG
4952static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
4953{
4954 switch (load_code) {
4955 case FW_MSG_CODE_DRV_LOAD_COMMON:
4956 bnx2x_init_internal_common(bp);
4957 /* no break */
4958
4959 case FW_MSG_CODE_DRV_LOAD_PORT:
4960 bnx2x_init_internal_port(bp);
4961 /* no break */
4962
4963 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4964 bnx2x_init_internal_func(bp);
4965 break;
4966
4967 default:
4968 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4969 break;
4970 }
4971}
4972
4973static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
4974{
4975 int i;
4976
4977 for_each_queue(bp, i) {
4978 struct bnx2x_fastpath *fp = &bp->fp[i];
4979
34f80b04 4980 fp->bp = bp;
a2fbb9ea 4981 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 4982 fp->index = i;
34f80b04
EG
4983 fp->cl_id = BP_L_ID(bp) + i;
4984 fp->sb_id = fp->cl_id;
4985 DP(NETIF_MSG_IFUP,
4986 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
4987 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
5c862848
EG
4988 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
4989 FP_SB_ID(fp));
4990 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
4991 }
4992
5c862848
EG
4993 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
4994 DEF_SB_ID);
4995 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
4996 bnx2x_update_coalesce(bp);
4997 bnx2x_init_rx_rings(bp);
4998 bnx2x_init_tx_ring(bp);
4999 bnx2x_init_sp_ring(bp);
5000 bnx2x_init_context(bp);
471de716 5001 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5002 bnx2x_init_ind_table(bp);
0ef00459
EG
5003 bnx2x_stats_init(bp);
5004
5005 /* At this point, we are ready for interrupts */
5006 atomic_set(&bp->intr_sem, 0);
5007
5008 /* flush all before enabling interrupts */
5009 mb();
5010 mmiowb();
5011
615f8fd9 5012 bnx2x_int_enable(bp);
a2fbb9ea
ET
5013}
5014
5015/* end of nic init */
5016
5017/*
5018 * gzip service functions
5019 */
5020
5021static int bnx2x_gunzip_init(struct bnx2x *bp)
5022{
5023 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5024 &bp->gunzip_mapping);
5025 if (bp->gunzip_buf == NULL)
5026 goto gunzip_nomem1;
5027
5028 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5029 if (bp->strm == NULL)
5030 goto gunzip_nomem2;
5031
5032 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5033 GFP_KERNEL);
5034 if (bp->strm->workspace == NULL)
5035 goto gunzip_nomem3;
5036
5037 return 0;
5038
5039gunzip_nomem3:
5040 kfree(bp->strm);
5041 bp->strm = NULL;
5042
5043gunzip_nomem2:
5044 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5045 bp->gunzip_mapping);
5046 bp->gunzip_buf = NULL;
5047
5048gunzip_nomem1:
5049 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 5050 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
5051 return -ENOMEM;
5052}
5053
5054static void bnx2x_gunzip_end(struct bnx2x *bp)
5055{
5056 kfree(bp->strm->workspace);
5057
5058 kfree(bp->strm);
5059 bp->strm = NULL;
5060
5061 if (bp->gunzip_buf) {
5062 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5063 bp->gunzip_mapping);
5064 bp->gunzip_buf = NULL;
5065 }
5066}
5067
5068static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5069{
5070 int n, rc;
5071
5072 /* check gzip header */
5073 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5074 return -EINVAL;
5075
5076 n = 10;
5077
34f80b04 5078#define FNAME 0x8
a2fbb9ea
ET
5079
5080 if (zbuf[3] & FNAME)
5081 while ((zbuf[n++] != 0) && (n < len));
5082
5083 bp->strm->next_in = zbuf + n;
5084 bp->strm->avail_in = len - n;
5085 bp->strm->next_out = bp->gunzip_buf;
5086 bp->strm->avail_out = FW_BUF_SIZE;
5087
5088 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5089 if (rc != Z_OK)
5090 return rc;
5091
5092 rc = zlib_inflate(bp->strm, Z_FINISH);
5093 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5094 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5095 bp->dev->name, bp->strm->msg);
5096
5097 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5098 if (bp->gunzip_outlen & 0x3)
5099 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5100 " gunzip_outlen (%d) not aligned\n",
5101 bp->dev->name, bp->gunzip_outlen);
5102 bp->gunzip_outlen >>= 2;
5103
5104 zlib_inflateEnd(bp->strm);
5105
5106 if (rc == Z_STREAM_END)
5107 return 0;
5108
5109 return rc;
5110}
5111
5112/* nic load/unload */
5113
5114/*
34f80b04 5115 * General service functions
a2fbb9ea
ET
5116 */
5117
5118/* send a NIG loopback debug packet */
5119static void bnx2x_lb_pckt(struct bnx2x *bp)
5120{
a2fbb9ea 5121 u32 wb_write[3];
a2fbb9ea
ET
5122
5123 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5124 wb_write[0] = 0x55555555;
5125 wb_write[1] = 0x55555555;
34f80b04 5126 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5127 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5128
5129 /* NON-IP protocol */
a2fbb9ea
ET
5130 wb_write[0] = 0x09000000;
5131 wb_write[1] = 0x55555555;
34f80b04 5132 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5133 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5134}
5135
5136/* some of the internal memories
5137 * are not directly readable from the driver
5138 * to test them we send debug packets
5139 */
5140static int bnx2x_int_mem_test(struct bnx2x *bp)
5141{
5142 int factor;
5143 int count, i;
5144 u32 val = 0;
5145
ad8d3948 5146 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5147 factor = 120;
ad8d3948
EG
5148 else if (CHIP_REV_IS_EMUL(bp))
5149 factor = 200;
5150 else
a2fbb9ea 5151 factor = 1;
a2fbb9ea
ET
5152
5153 DP(NETIF_MSG_HW, "start part1\n");
5154
5155 /* Disable inputs of parser neighbor blocks */
5156 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5157 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5158 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5159 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5160
5161 /* Write 0 to parser credits for CFC search request */
5162 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5163
5164 /* send Ethernet packet */
5165 bnx2x_lb_pckt(bp);
5166
5167 /* TODO do i reset NIG statistic? */
5168 /* Wait until NIG register shows 1 packet of size 0x10 */
5169 count = 1000 * factor;
5170 while (count) {
34f80b04 5171
a2fbb9ea
ET
5172 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5173 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5174 if (val == 0x10)
5175 break;
5176
5177 msleep(10);
5178 count--;
5179 }
5180 if (val != 0x10) {
5181 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5182 return -1;
5183 }
5184
5185 /* Wait until PRS register shows 1 packet */
5186 count = 1000 * factor;
5187 while (count) {
5188 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5189 if (val == 1)
5190 break;
5191
5192 msleep(10);
5193 count--;
5194 }
5195 if (val != 0x1) {
5196 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5197 return -2;
5198 }
5199
5200 /* Reset and init BRB, PRS */
34f80b04 5201 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5202 msleep(50);
34f80b04 5203 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea
ET
5204 msleep(50);
5205 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5206 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5207
5208 DP(NETIF_MSG_HW, "part2\n");
5209
5210 /* Disable inputs of parser neighbor blocks */
5211 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5212 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5213 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5214 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5215
5216 /* Write 0 to parser credits for CFC search request */
5217 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5218
5219 /* send 10 Ethernet packets */
5220 for (i = 0; i < 10; i++)
5221 bnx2x_lb_pckt(bp);
5222
5223 /* Wait until NIG register shows 10 + 1
5224 packets of size 11*0x10 = 0xb0 */
5225 count = 1000 * factor;
5226 while (count) {
34f80b04 5227
a2fbb9ea
ET
5228 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5229 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5230 if (val == 0xb0)
5231 break;
5232
5233 msleep(10);
5234 count--;
5235 }
5236 if (val != 0xb0) {
5237 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5238 return -3;
5239 }
5240
5241 /* Wait until PRS register shows 2 packets */
5242 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5243 if (val != 2)
5244 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5245
5246 /* Write 1 to parser credits for CFC search request */
5247 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5248
5249 /* Wait until PRS register shows 3 packets */
5250 msleep(10 * factor);
5251 /* Wait until NIG register shows 1 packet of size 0x10 */
5252 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5253 if (val != 3)
5254 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5255
5256 /* clear NIG EOP FIFO */
5257 for (i = 0; i < 11; i++)
5258 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5259 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5260 if (val != 1) {
5261 BNX2X_ERR("clear of NIG failed\n");
5262 return -4;
5263 }
5264
5265 /* Reset and init BRB, PRS, NIG */
5266 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5267 msleep(50);
5268 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5269 msleep(50);
5270 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5271 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5272#ifndef BCM_ISCSI
5273 /* set NIC mode */
5274 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5275#endif
5276
5277 /* Enable inputs of parser neighbor blocks */
5278 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5279 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5280 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5281 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5282
5283 DP(NETIF_MSG_HW, "done\n");
5284
5285 return 0; /* OK */
5286}
5287
5288static void enable_blocks_attention(struct bnx2x *bp)
5289{
5290 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5291 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5292 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5293 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5294 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5295 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5296 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5297 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5298 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5299/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5300/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5301 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5302 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5303 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5304/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5305/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5306 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5307 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5308 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5309 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5310/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5311/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5312 if (CHIP_REV_IS_FPGA(bp))
5313 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5314 else
5315 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5316 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5317 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5318 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5319/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5320/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5321 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5322 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5323/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5324 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5325}
5326
34f80b04 5327
81f75bbf
EG
5328static void bnx2x_reset_common(struct bnx2x *bp)
5329{
5330 /* reset_common */
5331 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5332 0xd3ffff7f);
5333 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5334}
5335
34f80b04 5336static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5337{
a2fbb9ea 5338 u32 val, i;
a2fbb9ea 5339
34f80b04 5340 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5341
81f75bbf 5342 bnx2x_reset_common(bp);
34f80b04
EG
5343 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5344 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5345
34f80b04
EG
5346 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5347 if (CHIP_IS_E1H(bp))
5348 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5349
34f80b04
EG
5350 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5351 msleep(30);
5352 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5353
34f80b04
EG
5354 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5355 if (CHIP_IS_E1(bp)) {
5356 /* enable HW interrupt from PXP on USDM overflow
5357 bit 16 on INT_MASK_0 */
5358 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5359 }
a2fbb9ea 5360
34f80b04
EG
5361 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5362 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5363
5364#ifdef __BIG_ENDIAN
34f80b04
EG
5365 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5366 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5367 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5368 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5369 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
5370 /* make sure this value is 0 */
5371 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
5372
5373/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5374 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5375 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5376 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5377 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5378#endif
5379
34f80b04 5380 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5381#ifdef BCM_ISCSI
34f80b04
EG
5382 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5383 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5384 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5385#endif
5386
34f80b04
EG
5387 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5388 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5389
34f80b04
EG
5390 /* let the HW do it's magic ... */
5391 msleep(100);
5392 /* finish PXP init */
5393 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5394 if (val != 1) {
5395 BNX2X_ERR("PXP2 CFG failed\n");
5396 return -EBUSY;
5397 }
5398 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5399 if (val != 1) {
5400 BNX2X_ERR("PXP2 RD_INIT failed\n");
5401 return -EBUSY;
5402 }
a2fbb9ea 5403
34f80b04
EG
5404 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5405 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5406
34f80b04 5407 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
a2fbb9ea 5408
34f80b04
EG
5409 /* clean the DMAE memory */
5410 bp->dmae_ready = 1;
5411 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5412
34f80b04
EG
5413 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5414 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5415 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5416 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
a2fbb9ea 5417
34f80b04
EG
5418 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5419 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5420 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5421 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5422
5423 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5424 /* soft reset pulse */
5425 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5426 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5427
5428#ifdef BCM_ISCSI
34f80b04 5429 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
a2fbb9ea 5430#endif
a2fbb9ea 5431
34f80b04
EG
5432 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5433 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5434 if (!CHIP_REV_IS_SLOW(bp)) {
5435 /* enable hw interrupt from doorbell Q */
5436 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5437 }
a2fbb9ea 5438
34f80b04
EG
5439 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5440 if (CHIP_REV_IS_SLOW(bp)) {
5441 /* fix for emulation and FPGA for no pause */
5442 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0, 513);
5443 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_1, 513);
5444 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0, 0);
5445 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_1, 0);
5446 }
a2fbb9ea 5447
34f80b04 5448 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
26c8fa4d 5449 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3196a88a
EG
5450 /* set NIC mode */
5451 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
5452 if (CHIP_IS_E1H(bp))
5453 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5454
34f80b04
EG
5455 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5456 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5457 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5458 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
a2fbb9ea 5459
34f80b04
EG
5460 if (CHIP_IS_E1H(bp)) {
5461 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5462 STORM_INTMEM_SIZE_E1H/2);
5463 bnx2x_init_fill(bp,
5464 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5465 0, STORM_INTMEM_SIZE_E1H/2);
5466 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5467 STORM_INTMEM_SIZE_E1H/2);
5468 bnx2x_init_fill(bp,
5469 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5470 0, STORM_INTMEM_SIZE_E1H/2);
5471 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5472 STORM_INTMEM_SIZE_E1H/2);
5473 bnx2x_init_fill(bp,
5474 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5475 0, STORM_INTMEM_SIZE_E1H/2);
5476 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5477 STORM_INTMEM_SIZE_E1H/2);
5478 bnx2x_init_fill(bp,
5479 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5480 0, STORM_INTMEM_SIZE_E1H/2);
5481 } else { /* E1 */
ad8d3948
EG
5482 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5483 STORM_INTMEM_SIZE_E1);
5484 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5485 STORM_INTMEM_SIZE_E1);
5486 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5487 STORM_INTMEM_SIZE_E1);
5488 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5489 STORM_INTMEM_SIZE_E1);
34f80b04 5490 }
a2fbb9ea 5491
34f80b04
EG
5492 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5493 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5494 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5495 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
a2fbb9ea 5496
34f80b04
EG
5497 /* sync semi rtc */
5498 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5499 0x80000000);
5500 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5501 0x80000000);
a2fbb9ea 5502
34f80b04
EG
5503 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5504 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5505 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
a2fbb9ea 5506
34f80b04
EG
5507 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5508 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5509 REG_WR(bp, i, 0xc0cac01a);
5510 /* TODO: replace with something meaningful */
5511 }
8d9c5f34 5512 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
34f80b04 5513 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5514
34f80b04
EG
5515 if (sizeof(union cdu_context) != 1024)
5516 /* we currently assume that a context is 1024 bytes */
5517 printk(KERN_ALERT PFX "please adjust the size of"
5518 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5519
34f80b04
EG
5520 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5521 val = (4 << 24) + (0 << 12) + 1024;
5522 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5523 if (CHIP_IS_E1(bp)) {
5524 /* !!! fix pxp client crdit until excel update */
5525 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5526 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5527 }
a2fbb9ea 5528
34f80b04
EG
5529 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5530 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
5531 /* enable context validation interrupt from CFC */
5532 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5533
5534 /* set the thresholds to prevent CFC/CDU race */
5535 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 5536
34f80b04
EG
5537 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5538 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
a2fbb9ea 5539
34f80b04
EG
5540 /* PXPCS COMMON comes here */
5541 /* Reset PCIE errors for debug */
5542 REG_WR(bp, 0x2814, 0xffffffff);
5543 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5544
34f80b04
EG
5545 /* EMAC0 COMMON comes here */
5546 /* EMAC1 COMMON comes here */
5547 /* DBU COMMON comes here */
5548 /* DBG COMMON comes here */
5549
5550 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5551 if (CHIP_IS_E1H(bp)) {
5552 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5553 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5554 }
5555
5556 if (CHIP_REV_IS_SLOW(bp))
5557 msleep(200);
5558
5559 /* finish CFC init */
5560 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5561 if (val != 1) {
5562 BNX2X_ERR("CFC LL_INIT failed\n");
5563 return -EBUSY;
5564 }
5565 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5566 if (val != 1) {
5567 BNX2X_ERR("CFC AC_INIT failed\n");
5568 return -EBUSY;
5569 }
5570 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5571 if (val != 1) {
5572 BNX2X_ERR("CFC CAM_INIT failed\n");
5573 return -EBUSY;
5574 }
5575 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5576
34f80b04
EG
5577 /* read NIG statistic
5578 to see if this is our first up since powerup */
5579 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5580 val = *bnx2x_sp(bp, wb_data[0]);
5581
5582 /* do internal memory self test */
5583 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5584 BNX2X_ERR("internal mem self test failed\n");
5585 return -EBUSY;
5586 }
5587
5588 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 5589 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
34f80b04
EG
5590 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5591 /* Fan failure is indicated by SPIO 5 */
5592 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5593 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5594
5595 /* set to active low mode */
5596 val = REG_RD(bp, MISC_REG_SPIO_INT);
5597 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 5598 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 5599 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 5600
34f80b04
EG
5601 /* enable interrupt to signal the IGU */
5602 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5603 val |= (1 << MISC_REGISTERS_SPIO_5);
5604 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5605 break;
f1410647 5606
34f80b04
EG
5607 default:
5608 break;
5609 }
f1410647 5610
34f80b04
EG
5611 /* clear PXP2 attentions */
5612 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5613
34f80b04 5614 enable_blocks_attention(bp);
a2fbb9ea 5615
6bbca910
YR
5616 if (!BP_NOMCP(bp)) {
5617 bnx2x_acquire_phy_lock(bp);
5618 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5619 bnx2x_release_phy_lock(bp);
5620 } else
5621 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5622
34f80b04
EG
5623 return 0;
5624}
a2fbb9ea 5625
34f80b04
EG
5626static int bnx2x_init_port(struct bnx2x *bp)
5627{
5628 int port = BP_PORT(bp);
5629 u32 val;
a2fbb9ea 5630
34f80b04
EG
5631 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5632
5633 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5634
5635 /* Port PXP comes here */
5636 /* Port PXP2 comes here */
a2fbb9ea
ET
5637#ifdef BCM_ISCSI
5638 /* Port0 1
5639 * Port1 385 */
5640 i++;
5641 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5642 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5643 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5644 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5645
5646 /* Port0 2
5647 * Port1 386 */
5648 i++;
5649 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5650 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5651 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5652 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5653
5654 /* Port0 3
5655 * Port1 387 */
5656 i++;
5657 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5658 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5659 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5660 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5661#endif
34f80b04 5662 /* Port CMs come here */
8d9c5f34
EG
5663 bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
5664 (port ? XCM_PORT1_END : XCM_PORT0_END));
a2fbb9ea
ET
5665
5666 /* Port QM comes here */
a2fbb9ea
ET
5667#ifdef BCM_ISCSI
5668 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5669 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5670
5671 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5672 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5673#endif
5674 /* Port DQ comes here */
5675 /* Port BRB1 comes here */
ad8d3948 5676 /* Port PRS comes here */
a2fbb9ea
ET
5677 /* Port TSDM comes here */
5678 /* Port CSDM comes here */
5679 /* Port USDM comes here */
5680 /* Port XSDM comes here */
34f80b04
EG
5681 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5682 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5683 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5684 port ? USEM_PORT1_END : USEM_PORT0_END);
5685 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5686 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5687 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5688 port ? XSEM_PORT1_END : XSEM_PORT0_END);
a2fbb9ea 5689 /* Port UPB comes here */
34f80b04
EG
5690 /* Port XPB comes here */
5691
5692 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5693 port ? PBF_PORT1_END : PBF_PORT0_END);
a2fbb9ea
ET
5694
5695 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5696 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5697
5698 /* update threshold */
34f80b04 5699 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5700 /* update init credit */
34f80b04 5701 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5702
5703 /* probe changes */
34f80b04 5704 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5705 msleep(5);
34f80b04 5706 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5707
5708#ifdef BCM_ISCSI
5709 /* tell the searcher where the T2 table is */
5710 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5711
5712 wb_write[0] = U64_LO(bp->t2_mapping);
5713 wb_write[1] = U64_HI(bp->t2_mapping);
5714 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5715 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5716 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5717 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5718
5719 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5720 /* Port SRCH comes here */
5721#endif
5722 /* Port CDU comes here */
5723 /* Port CFC comes here */
34f80b04
EG
5724
5725 if (CHIP_IS_E1(bp)) {
5726 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5727 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5728 }
5729 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5730 port ? HC_PORT1_END : HC_PORT0_END);
5731
5732 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
a2fbb9ea 5733 MISC_AEU_PORT0_START,
34f80b04
EG
5734 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5735 /* init aeu_mask_attn_func_0/1:
5736 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5737 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5738 * bits 4-7 are used for "per vn group attention" */
5739 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5740 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5741
a2fbb9ea
ET
5742 /* Port PXPCS comes here */
5743 /* Port EMAC0 comes here */
5744 /* Port EMAC1 comes here */
5745 /* Port DBU comes here */
5746 /* Port DBG comes here */
34f80b04
EG
5747 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5748 port ? NIG_PORT1_END : NIG_PORT0_END);
5749
5750 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5751
5752 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
5753 /* 0x2 disable e1hov, 0x1 enable */
5754 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5755 (IS_E1HMF(bp) ? 0x1 : 0x2));
5756
34f80b04
EG
5757 }
5758
a2fbb9ea
ET
5759 /* Port MCP comes here */
5760 /* Port DMAE comes here */
5761
34f80b04 5762 switch (bp->common.board & SHARED_HW_CFG_BOARD_TYPE_MASK) {
7add905f 5763 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1021G:
f1410647
ET
5764 case SHARED_HW_CFG_BOARD_TYPE_BCM957710A1022G:
5765 /* add SPIO 5 to group 0 */
5766 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5767 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5768 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5769 break;
5770
5771 default:
5772 break;
5773 }
5774
c18487ee 5775 bnx2x__link_reset(bp);
a2fbb9ea 5776
34f80b04
EG
5777 return 0;
5778}
5779
5780#define ILT_PER_FUNC (768/2)
5781#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5782/* the phys address is shifted right 12 bits and has an added
5783 1=valid bit added to the 53rd bit
5784 then since this is a wide register(TM)
5785 we split it into two 32 bit writes
5786 */
5787#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5788#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5789#define PXP_ONE_ILT(x) (((x) << 10) | x)
5790#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5791
5792#define CNIC_ILT_LINES 0
5793
5794static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5795{
5796 int reg;
5797
5798 if (CHIP_IS_E1H(bp))
5799 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5800 else /* E1 */
5801 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5802
5803 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5804}
5805
5806static int bnx2x_init_func(struct bnx2x *bp)
5807{
5808 int port = BP_PORT(bp);
5809 int func = BP_FUNC(bp);
8badd27a 5810 u32 addr, val;
34f80b04
EG
5811 int i;
5812
5813 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5814
8badd27a
EG
5815 /* set MSI reconfigure capability */
5816 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5817 val = REG_RD(bp, addr);
5818 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5819 REG_WR(bp, addr, val);
5820
34f80b04
EG
5821 i = FUNC_ILT_BASE(func);
5822
5823 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5824 if (CHIP_IS_E1H(bp)) {
5825 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5826 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5827 } else /* E1 */
5828 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5829 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5830
5831
5832 if (CHIP_IS_E1H(bp)) {
5833 for (i = 0; i < 9; i++)
5834 bnx2x_init_block(bp,
5835 cm_start[func][i], cm_end[func][i]);
5836
5837 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5838 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5839 }
5840
5841 /* HC init per function */
5842 if (CHIP_IS_E1H(bp)) {
5843 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5844
5845 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5846 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5847 }
5848 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5849
c14423fe 5850 /* Reset PCIE errors for debug */
a2fbb9ea
ET
5851 REG_WR(bp, 0x2114, 0xffffffff);
5852 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 5853
34f80b04
EG
5854 return 0;
5855}
5856
5857static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
5858{
5859 int i, rc = 0;
a2fbb9ea 5860
34f80b04
EG
5861 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
5862 BP_FUNC(bp), load_code);
a2fbb9ea 5863
34f80b04
EG
5864 bp->dmae_ready = 0;
5865 mutex_init(&bp->dmae_mutex);
5866 bnx2x_gunzip_init(bp);
a2fbb9ea 5867
34f80b04
EG
5868 switch (load_code) {
5869 case FW_MSG_CODE_DRV_LOAD_COMMON:
5870 rc = bnx2x_init_common(bp);
5871 if (rc)
5872 goto init_hw_err;
5873 /* no break */
5874
5875 case FW_MSG_CODE_DRV_LOAD_PORT:
5876 bp->dmae_ready = 1;
5877 rc = bnx2x_init_port(bp);
5878 if (rc)
5879 goto init_hw_err;
5880 /* no break */
5881
5882 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5883 bp->dmae_ready = 1;
5884 rc = bnx2x_init_func(bp);
5885 if (rc)
5886 goto init_hw_err;
5887 break;
5888
5889 default:
5890 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5891 break;
5892 }
5893
5894 if (!BP_NOMCP(bp)) {
5895 int func = BP_FUNC(bp);
a2fbb9ea
ET
5896
5897 bp->fw_drv_pulse_wr_seq =
34f80b04 5898 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 5899 DRV_PULSE_SEQ_MASK);
34f80b04
EG
5900 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
5901 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
5902 bp->fw_drv_pulse_wr_seq, bp->func_stx);
5903 } else
5904 bp->func_stx = 0;
a2fbb9ea 5905
34f80b04
EG
5906 /* this needs to be done before gunzip end */
5907 bnx2x_zero_def_sb(bp);
5908 for_each_queue(bp, i)
5909 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
5910
5911init_hw_err:
5912 bnx2x_gunzip_end(bp);
5913
5914 return rc;
a2fbb9ea
ET
5915}
5916
c14423fe 5917/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
5918static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
5919{
34f80b04 5920 int func = BP_FUNC(bp);
f1410647
ET
5921 u32 seq = ++bp->fw_seq;
5922 u32 rc = 0;
19680c48
EG
5923 u32 cnt = 1;
5924 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 5925
34f80b04 5926 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 5927 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 5928
19680c48
EG
5929 do {
5930 /* let the FW do it's magic ... */
5931 msleep(delay);
a2fbb9ea 5932
19680c48 5933 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 5934
19680c48
EG
5935 /* Give the FW up to 2 second (200*10ms) */
5936 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
5937
5938 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
5939 cnt*delay, rc, seq);
a2fbb9ea
ET
5940
5941 /* is this a reply to our command? */
5942 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
5943 rc &= FW_MSG_CODE_MASK;
f1410647 5944
a2fbb9ea
ET
5945 } else {
5946 /* FW BUG! */
5947 BNX2X_ERR("FW failed to respond!\n");
5948 bnx2x_fw_dump(bp);
5949 rc = 0;
5950 }
f1410647 5951
a2fbb9ea
ET
5952 return rc;
5953}
5954
5955static void bnx2x_free_mem(struct bnx2x *bp)
5956{
5957
5958#define BNX2X_PCI_FREE(x, y, size) \
5959 do { \
5960 if (x) { \
5961 pci_free_consistent(bp->pdev, size, x, y); \
5962 x = NULL; \
5963 y = 0; \
5964 } \
5965 } while (0)
5966
5967#define BNX2X_FREE(x) \
5968 do { \
5969 if (x) { \
5970 vfree(x); \
5971 x = NULL; \
5972 } \
5973 } while (0)
5974
5975 int i;
5976
5977 /* fastpath */
555f6c78 5978 /* Common */
a2fbb9ea
ET
5979 for_each_queue(bp, i) {
5980
555f6c78 5981 /* status blocks */
a2fbb9ea
ET
5982 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
5983 bnx2x_fp(bp, i, status_blk_mapping),
5984 sizeof(struct host_status_block) +
5985 sizeof(struct eth_tx_db_data));
555f6c78
EG
5986 }
5987 /* Rx */
5988 for_each_rx_queue(bp, i) {
a2fbb9ea 5989
555f6c78 5990 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
5991 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
5992 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
5993 bnx2x_fp(bp, i, rx_desc_mapping),
5994 sizeof(struct eth_rx_bd) * NUM_RX_BD);
5995
5996 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
5997 bnx2x_fp(bp, i, rx_comp_mapping),
5998 sizeof(struct eth_fast_path_rx_cqe) *
5999 NUM_RCQ_BD);
a2fbb9ea 6000
7a9b2557 6001 /* SGE ring */
32626230 6002 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6003 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6004 bnx2x_fp(bp, i, rx_sge_mapping),
6005 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6006 }
555f6c78
EG
6007 /* Tx */
6008 for_each_tx_queue(bp, i) {
6009
6010 /* fastpath tx rings: tx_buf tx_desc */
6011 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6012 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6013 bnx2x_fp(bp, i, tx_desc_mapping),
6014 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6015 }
a2fbb9ea
ET
6016 /* end of fastpath */
6017
6018 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6019 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6020
6021 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6022 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
6023
6024#ifdef BCM_ISCSI
6025 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6026 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6027 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6028 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6029#endif
7a9b2557 6030 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6031
6032#undef BNX2X_PCI_FREE
6033#undef BNX2X_KFREE
6034}
6035
6036static int bnx2x_alloc_mem(struct bnx2x *bp)
6037{
6038
6039#define BNX2X_PCI_ALLOC(x, y, size) \
6040 do { \
6041 x = pci_alloc_consistent(bp->pdev, size, y); \
6042 if (x == NULL) \
6043 goto alloc_mem_err; \
6044 memset(x, 0, size); \
6045 } while (0)
6046
6047#define BNX2X_ALLOC(x, size) \
6048 do { \
6049 x = vmalloc(size); \
6050 if (x == NULL) \
6051 goto alloc_mem_err; \
6052 memset(x, 0, size); \
6053 } while (0)
6054
6055 int i;
6056
6057 /* fastpath */
555f6c78 6058 /* Common */
a2fbb9ea
ET
6059 for_each_queue(bp, i) {
6060 bnx2x_fp(bp, i, bp) = bp;
6061
555f6c78 6062 /* status blocks */
a2fbb9ea
ET
6063 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6064 &bnx2x_fp(bp, i, status_blk_mapping),
6065 sizeof(struct host_status_block) +
6066 sizeof(struct eth_tx_db_data));
555f6c78
EG
6067 }
6068 /* Rx */
6069 for_each_rx_queue(bp, i) {
a2fbb9ea 6070
555f6c78 6071 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6072 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6073 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6074 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6075 &bnx2x_fp(bp, i, rx_desc_mapping),
6076 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6077
6078 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6079 &bnx2x_fp(bp, i, rx_comp_mapping),
6080 sizeof(struct eth_fast_path_rx_cqe) *
6081 NUM_RCQ_BD);
6082
7a9b2557
VZ
6083 /* SGE ring */
6084 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6085 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6086 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6087 &bnx2x_fp(bp, i, rx_sge_mapping),
6088 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6089 }
555f6c78
EG
6090 /* Tx */
6091 for_each_tx_queue(bp, i) {
6092
6093 bnx2x_fp(bp, i, hw_tx_prods) =
6094 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6095
6096 bnx2x_fp(bp, i, tx_prods_mapping) =
6097 bnx2x_fp(bp, i, status_blk_mapping) +
6098 sizeof(struct host_status_block);
6099
6100 /* fastpath tx rings: tx_buf tx_desc */
6101 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6102 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6103 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6104 &bnx2x_fp(bp, i, tx_desc_mapping),
6105 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6106 }
a2fbb9ea
ET
6107 /* end of fastpath */
6108
6109 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6110 sizeof(struct host_def_status_block));
6111
6112 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6113 sizeof(struct bnx2x_slowpath));
6114
6115#ifdef BCM_ISCSI
6116 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6117
6118 /* Initialize T1 */
6119 for (i = 0; i < 64*1024; i += 64) {
6120 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6121 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6122 }
6123
6124 /* allocate searcher T2 table
6125 we allocate 1/4 of alloc num for T2
6126 (which is not entered into the ILT) */
6127 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6128
6129 /* Initialize T2 */
6130 for (i = 0; i < 16*1024; i += 64)
6131 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6132
c14423fe 6133 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
6134 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6135
6136 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6137 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6138
6139 /* QM queues (128*MAX_CONN) */
6140 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6141#endif
6142
6143 /* Slow path ring */
6144 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6145
6146 return 0;
6147
6148alloc_mem_err:
6149 bnx2x_free_mem(bp);
6150 return -ENOMEM;
6151
6152#undef BNX2X_PCI_ALLOC
6153#undef BNX2X_ALLOC
6154}
6155
6156static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6157{
6158 int i;
6159
555f6c78 6160 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
6161 struct bnx2x_fastpath *fp = &bp->fp[i];
6162
6163 u16 bd_cons = fp->tx_bd_cons;
6164 u16 sw_prod = fp->tx_pkt_prod;
6165 u16 sw_cons = fp->tx_pkt_cons;
6166
a2fbb9ea
ET
6167 while (sw_cons != sw_prod) {
6168 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6169 sw_cons++;
6170 }
6171 }
6172}
6173
6174static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6175{
6176 int i, j;
6177
555f6c78 6178 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6179 struct bnx2x_fastpath *fp = &bp->fp[j];
6180
a2fbb9ea
ET
6181 for (i = 0; i < NUM_RX_BD; i++) {
6182 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6183 struct sk_buff *skb = rx_buf->skb;
6184
6185 if (skb == NULL)
6186 continue;
6187
6188 pci_unmap_single(bp->pdev,
6189 pci_unmap_addr(rx_buf, mapping),
437cf2f1 6190 bp->rx_buf_size,
a2fbb9ea
ET
6191 PCI_DMA_FROMDEVICE);
6192
6193 rx_buf->skb = NULL;
6194 dev_kfree_skb(skb);
6195 }
7a9b2557 6196 if (!fp->disable_tpa)
32626230
EG
6197 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6198 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6199 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6200 }
6201}
6202
6203static void bnx2x_free_skbs(struct bnx2x *bp)
6204{
6205 bnx2x_free_tx_skbs(bp);
6206 bnx2x_free_rx_skbs(bp);
6207}
6208
6209static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6210{
34f80b04 6211 int i, offset = 1;
a2fbb9ea
ET
6212
6213 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6214 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6215 bp->msix_table[0].vector);
6216
6217 for_each_queue(bp, i) {
c14423fe 6218 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6219 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6220 bnx2x_fp(bp, i, state));
6221
34f80b04 6222 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6223 }
a2fbb9ea
ET
6224}
6225
6226static void bnx2x_free_irq(struct bnx2x *bp)
6227{
a2fbb9ea 6228 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6229 bnx2x_free_msix_irqs(bp);
6230 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6231 bp->flags &= ~USING_MSIX_FLAG;
6232
8badd27a
EG
6233 } else if (bp->flags & USING_MSI_FLAG) {
6234 free_irq(bp->pdev->irq, bp->dev);
6235 pci_disable_msi(bp->pdev);
6236 bp->flags &= ~USING_MSI_FLAG;
6237
a2fbb9ea
ET
6238 } else
6239 free_irq(bp->pdev->irq, bp->dev);
6240}
6241
6242static int bnx2x_enable_msix(struct bnx2x *bp)
6243{
8badd27a
EG
6244 int i, rc, offset = 1;
6245 int igu_vec = 0;
a2fbb9ea 6246
8badd27a
EG
6247 bp->msix_table[0].entry = igu_vec;
6248 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6249
34f80b04 6250 for_each_queue(bp, i) {
8badd27a 6251 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
6252 bp->msix_table[i + offset].entry = igu_vec;
6253 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6254 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6255 }
6256
34f80b04 6257 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 6258 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 6259 if (rc) {
8badd27a
EG
6260 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6261 return rc;
34f80b04 6262 }
8badd27a 6263
a2fbb9ea
ET
6264 bp->flags |= USING_MSIX_FLAG;
6265
6266 return 0;
a2fbb9ea
ET
6267}
6268
a2fbb9ea
ET
6269static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6270{
34f80b04 6271 int i, rc, offset = 1;
a2fbb9ea 6272
a2fbb9ea
ET
6273 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6274 bp->dev->name, bp->dev);
a2fbb9ea
ET
6275 if (rc) {
6276 BNX2X_ERR("request sp irq failed\n");
6277 return -EBUSY;
6278 }
6279
6280 for_each_queue(bp, i) {
555f6c78
EG
6281 struct bnx2x_fastpath *fp = &bp->fp[i];
6282
6283 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
34f80b04 6284 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 6285 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 6286 if (rc) {
555f6c78 6287 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
6288 bnx2x_free_msix_irqs(bp);
6289 return -EBUSY;
6290 }
6291
555f6c78 6292 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6293 }
6294
555f6c78
EG
6295 i = BNX2X_NUM_QUEUES(bp);
6296 if (is_multi(bp))
6297 printk(KERN_INFO PFX
6298 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6299 bp->dev->name, bp->msix_table[0].vector,
6300 bp->msix_table[offset].vector,
6301 bp->msix_table[offset + i - 1].vector);
6302 else
6303 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6304 bp->dev->name, bp->msix_table[0].vector,
6305 bp->msix_table[offset + i - 1].vector);
6306
a2fbb9ea 6307 return 0;
a2fbb9ea
ET
6308}
6309
8badd27a
EG
6310static int bnx2x_enable_msi(struct bnx2x *bp)
6311{
6312 int rc;
6313
6314 rc = pci_enable_msi(bp->pdev);
6315 if (rc) {
6316 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6317 return -1;
6318 }
6319 bp->flags |= USING_MSI_FLAG;
6320
6321 return 0;
6322}
6323
a2fbb9ea
ET
6324static int bnx2x_req_irq(struct bnx2x *bp)
6325{
8badd27a 6326 unsigned long flags;
34f80b04 6327 int rc;
a2fbb9ea 6328
8badd27a
EG
6329 if (bp->flags & USING_MSI_FLAG)
6330 flags = 0;
6331 else
6332 flags = IRQF_SHARED;
6333
6334 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 6335 bp->dev->name, bp->dev);
a2fbb9ea
ET
6336 if (!rc)
6337 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6338
6339 return rc;
a2fbb9ea
ET
6340}
6341
65abd74d
YG
6342static void bnx2x_napi_enable(struct bnx2x *bp)
6343{
6344 int i;
6345
555f6c78 6346 for_each_rx_queue(bp, i)
65abd74d
YG
6347 napi_enable(&bnx2x_fp(bp, i, napi));
6348}
6349
6350static void bnx2x_napi_disable(struct bnx2x *bp)
6351{
6352 int i;
6353
555f6c78 6354 for_each_rx_queue(bp, i)
65abd74d
YG
6355 napi_disable(&bnx2x_fp(bp, i, napi));
6356}
6357
6358static void bnx2x_netif_start(struct bnx2x *bp)
6359{
6360 if (atomic_dec_and_test(&bp->intr_sem)) {
6361 if (netif_running(bp->dev)) {
65abd74d
YG
6362 bnx2x_napi_enable(bp);
6363 bnx2x_int_enable(bp);
555f6c78
EG
6364 if (bp->state == BNX2X_STATE_OPEN)
6365 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
6366 }
6367 }
6368}
6369
f8ef6e44 6370static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6371{
f8ef6e44 6372 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 6373 bnx2x_napi_disable(bp);
65abd74d 6374 if (netif_running(bp->dev)) {
65abd74d
YG
6375 netif_tx_disable(bp->dev);
6376 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6377 }
6378}
6379
a2fbb9ea
ET
6380/*
6381 * Init service functions
6382 */
6383
3101c2bc 6384static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6385{
6386 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6387 int port = BP_PORT(bp);
a2fbb9ea
ET
6388
6389 /* CAM allocation
6390 * unicasts 0-31:port0 32-63:port1
6391 * multicast 64-127:port0 128-191:port1
6392 */
8d9c5f34 6393 config->hdr.length = 2;
af246401 6394 config->hdr.offset = port ? 32 : 0;
34f80b04 6395 config->hdr.client_id = BP_CL_ID(bp);
a2fbb9ea
ET
6396 config->hdr.reserved1 = 0;
6397
6398 /* primary MAC */
6399 config->config_table[0].cam_entry.msb_mac_addr =
6400 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6401 config->config_table[0].cam_entry.middle_mac_addr =
6402 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6403 config->config_table[0].cam_entry.lsb_mac_addr =
6404 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6405 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6406 if (set)
6407 config->config_table[0].target_table_entry.flags = 0;
6408 else
6409 CAM_INVALIDATE(config->config_table[0]);
a2fbb9ea
ET
6410 config->config_table[0].target_table_entry.client_id = 0;
6411 config->config_table[0].target_table_entry.vlan_id = 0;
6412
3101c2bc
YG
6413 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6414 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6415 config->config_table[0].cam_entry.msb_mac_addr,
6416 config->config_table[0].cam_entry.middle_mac_addr,
6417 config->config_table[0].cam_entry.lsb_mac_addr);
6418
6419 /* broadcast */
6420 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6421 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6422 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
34f80b04 6423 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6424 if (set)
6425 config->config_table[1].target_table_entry.flags =
a2fbb9ea 6426 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
6427 else
6428 CAM_INVALIDATE(config->config_table[1]);
a2fbb9ea
ET
6429 config->config_table[1].target_table_entry.client_id = 0;
6430 config->config_table[1].target_table_entry.vlan_id = 0;
6431
6432 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6433 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6434 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6435}
6436
3101c2bc 6437static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
6438{
6439 struct mac_configuration_cmd_e1h *config =
6440 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6441
3101c2bc 6442 if (set && (bp->state != BNX2X_STATE_OPEN)) {
34f80b04
EG
6443 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6444 return;
6445 }
6446
6447 /* CAM allocation for E1H
6448 * unicasts: by func number
6449 * multicast: 20+FUNC*20, 20 each
6450 */
8d9c5f34 6451 config->hdr.length = 1;
34f80b04
EG
6452 config->hdr.offset = BP_FUNC(bp);
6453 config->hdr.client_id = BP_CL_ID(bp);
6454 config->hdr.reserved1 = 0;
6455
6456 /* primary MAC */
6457 config->config_table[0].msb_mac_addr =
6458 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6459 config->config_table[0].middle_mac_addr =
6460 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6461 config->config_table[0].lsb_mac_addr =
6462 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6463 config->config_table[0].client_id = BP_L_ID(bp);
6464 config->config_table[0].vlan_id = 0;
6465 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6466 if (set)
6467 config->config_table[0].flags = BP_PORT(bp);
6468 else
6469 config->config_table[0].flags =
6470 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6471
3101c2bc
YG
6472 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6473 (set ? "setting" : "clearing"),
34f80b04
EG
6474 config->config_table[0].msb_mac_addr,
6475 config->config_table[0].middle_mac_addr,
6476 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6477
6478 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6479 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6480 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6481}
6482
a2fbb9ea
ET
6483static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6484 int *state_p, int poll)
6485{
6486 /* can take a while if any port is running */
34f80b04 6487 int cnt = 500;
a2fbb9ea 6488
c14423fe
ET
6489 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6490 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6491
6492 might_sleep();
34f80b04 6493 while (cnt--) {
a2fbb9ea
ET
6494 if (poll) {
6495 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6496 /* if index is different from 0
6497 * the reply for some commands will
3101c2bc 6498 * be on the non default queue
a2fbb9ea
ET
6499 */
6500 if (idx)
6501 bnx2x_rx_int(&bp->fp[idx], 10);
6502 }
a2fbb9ea 6503
3101c2bc 6504 mb(); /* state is changed by bnx2x_sp_event() */
49d66772 6505 if (*state_p == state)
a2fbb9ea
ET
6506 return 0;
6507
a2fbb9ea 6508 msleep(1);
a2fbb9ea
ET
6509 }
6510
a2fbb9ea 6511 /* timeout! */
49d66772
ET
6512 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6513 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6514#ifdef BNX2X_STOP_ON_ERROR
6515 bnx2x_panic();
6516#endif
a2fbb9ea 6517
49d66772 6518 return -EBUSY;
a2fbb9ea
ET
6519}
6520
6521static int bnx2x_setup_leading(struct bnx2x *bp)
6522{
34f80b04 6523 int rc;
a2fbb9ea 6524
c14423fe 6525 /* reset IGU state */
34f80b04 6526 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6527
6528 /* SETUP ramrod */
6529 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6530
34f80b04
EG
6531 /* Wait for completion */
6532 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6533
34f80b04 6534 return rc;
a2fbb9ea
ET
6535}
6536
6537static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6538{
555f6c78
EG
6539 struct bnx2x_fastpath *fp = &bp->fp[index];
6540
a2fbb9ea 6541 /* reset IGU state */
555f6c78 6542 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6543
228241eb 6544 /* SETUP ramrod */
555f6c78
EG
6545 fp->state = BNX2X_FP_STATE_OPENING;
6546 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6547 fp->cl_id, 0);
a2fbb9ea
ET
6548
6549 /* Wait for completion */
6550 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 6551 &(fp->state), 0);
a2fbb9ea
ET
6552}
6553
a2fbb9ea 6554static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 6555
8badd27a 6556static void bnx2x_set_int_mode(struct bnx2x *bp)
a2fbb9ea 6557{
555f6c78 6558 int num_queues;
a2fbb9ea 6559
8badd27a
EG
6560 switch (int_mode) {
6561 case INT_MODE_INTx:
6562 case INT_MODE_MSI:
555f6c78
EG
6563 num_queues = 1;
6564 bp->num_rx_queues = num_queues;
6565 bp->num_tx_queues = num_queues;
6566 DP(NETIF_MSG_IFUP,
6567 "set number of queues to %d\n", num_queues);
8badd27a
EG
6568 break;
6569
6570 case INT_MODE_MSIX:
6571 default:
555f6c78
EG
6572 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6573 num_queues = min_t(u32, num_online_cpus(),
6574 BNX2X_MAX_QUEUES(bp));
34f80b04 6575 else
555f6c78
EG
6576 num_queues = 1;
6577 bp->num_rx_queues = num_queues;
6578 bp->num_tx_queues = num_queues;
6579 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6580 " number of tx queues to %d\n",
6581 bp->num_rx_queues, bp->num_tx_queues);
2dfe0e1f
EG
6582 /* if we can't use MSI-X we only need one fp,
6583 * so try to enable MSI-X with the requested number of fp's
6584 * and fallback to MSI or legacy INTx with one fp
6585 */
8badd27a 6586 if (bnx2x_enable_msix(bp)) {
34f80b04 6587 /* failed to enable MSI-X */
555f6c78
EG
6588 num_queues = 1;
6589 bp->num_rx_queues = num_queues;
6590 bp->num_tx_queues = num_queues;
6591 if (bp->multi_mode)
6592 BNX2X_ERR("Multi requested but failed to "
6593 "enable MSI-X set number of "
6594 "queues to %d\n", num_queues);
a2fbb9ea 6595 }
8badd27a 6596 break;
a2fbb9ea 6597 }
555f6c78 6598 bp->dev->real_num_tx_queues = bp->num_tx_queues;
8badd27a
EG
6599}
6600
6601static void bnx2x_set_rx_mode(struct net_device *dev);
6602
6603/* must be called with rtnl_lock */
6604static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6605{
6606 u32 load_code;
6607 int i, rc = 0;
6608#ifdef BNX2X_STOP_ON_ERROR
6609 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6610 if (unlikely(bp->panic))
6611 return -EPERM;
6612#endif
6613
6614 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6615
6616 bnx2x_set_int_mode(bp);
c14423fe 6617
a2fbb9ea
ET
6618 if (bnx2x_alloc_mem(bp))
6619 return -ENOMEM;
6620
555f6c78 6621 for_each_rx_queue(bp, i)
7a9b2557
VZ
6622 bnx2x_fp(bp, i, disable_tpa) =
6623 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6624
555f6c78 6625 for_each_rx_queue(bp, i)
2dfe0e1f
EG
6626 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6627 bnx2x_poll, 128);
6628
6629#ifdef BNX2X_STOP_ON_ERROR
555f6c78 6630 for_each_rx_queue(bp, i) {
2dfe0e1f
EG
6631 struct bnx2x_fastpath *fp = &bp->fp[i];
6632
6633 fp->poll_no_work = 0;
6634 fp->poll_calls = 0;
6635 fp->poll_max_calls = 0;
6636 fp->poll_complete = 0;
6637 fp->poll_exit = 0;
6638 }
6639#endif
6640 bnx2x_napi_enable(bp);
6641
34f80b04
EG
6642 if (bp->flags & USING_MSIX_FLAG) {
6643 rc = bnx2x_req_msix_irqs(bp);
6644 if (rc) {
6645 pci_disable_msix(bp->pdev);
2dfe0e1f 6646 goto load_error1;
34f80b04
EG
6647 }
6648 } else {
8badd27a
EG
6649 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6650 bnx2x_enable_msi(bp);
34f80b04
EG
6651 bnx2x_ack_int(bp);
6652 rc = bnx2x_req_irq(bp);
6653 if (rc) {
2dfe0e1f 6654 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
8badd27a
EG
6655 if (bp->flags & USING_MSI_FLAG)
6656 pci_disable_msi(bp->pdev);
2dfe0e1f 6657 goto load_error1;
a2fbb9ea 6658 }
8badd27a
EG
6659 if (bp->flags & USING_MSI_FLAG) {
6660 bp->dev->irq = bp->pdev->irq;
6661 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6662 bp->dev->name, bp->pdev->irq);
6663 }
a2fbb9ea
ET
6664 }
6665
2dfe0e1f
EG
6666 /* Send LOAD_REQUEST command to MCP
6667 Returns the type of LOAD command:
6668 if it is the first port to be initialized
6669 common blocks should be initialized, otherwise - not
6670 */
6671 if (!BP_NOMCP(bp)) {
6672 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6673 if (!load_code) {
6674 BNX2X_ERR("MCP response failure, aborting\n");
6675 rc = -EBUSY;
6676 goto load_error2;
6677 }
6678 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6679 rc = -EBUSY; /* other port in diagnostic mode */
6680 goto load_error2;
6681 }
6682
6683 } else {
6684 int port = BP_PORT(bp);
6685
6686 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6687 load_count[0], load_count[1], load_count[2]);
6688 load_count[0]++;
6689 load_count[1 + port]++;
6690 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6691 load_count[0], load_count[1], load_count[2]);
6692 if (load_count[0] == 1)
6693 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6694 else if (load_count[1 + port] == 1)
6695 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6696 else
6697 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6698 }
6699
6700 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6701 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6702 bp->port.pmf = 1;
6703 else
6704 bp->port.pmf = 0;
6705 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 6706
a2fbb9ea 6707 /* Initialize HW */
34f80b04
EG
6708 rc = bnx2x_init_hw(bp, load_code);
6709 if (rc) {
a2fbb9ea 6710 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 6711 goto load_error2;
a2fbb9ea
ET
6712 }
6713
a2fbb9ea 6714 /* Setup NIC internals and enable interrupts */
471de716 6715 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
6716
6717 /* Send LOAD_DONE command to MCP */
34f80b04 6718 if (!BP_NOMCP(bp)) {
228241eb
ET
6719 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6720 if (!load_code) {
da5a662a 6721 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 6722 rc = -EBUSY;
2dfe0e1f 6723 goto load_error3;
a2fbb9ea
ET
6724 }
6725 }
6726
6727 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6728
34f80b04
EG
6729 rc = bnx2x_setup_leading(bp);
6730 if (rc) {
da5a662a 6731 BNX2X_ERR("Setup leading failed!\n");
2dfe0e1f 6732 goto load_error3;
34f80b04 6733 }
a2fbb9ea 6734
34f80b04
EG
6735 if (CHIP_IS_E1H(bp))
6736 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6737 BNX2X_ERR("!!! mf_cfg function disabled\n");
6738 bp->state = BNX2X_STATE_DISABLED;
6739 }
a2fbb9ea 6740
34f80b04
EG
6741 if (bp->state == BNX2X_STATE_OPEN)
6742 for_each_nondefault_queue(bp, i) {
6743 rc = bnx2x_setup_multi(bp, i);
6744 if (rc)
2dfe0e1f 6745 goto load_error3;
34f80b04 6746 }
a2fbb9ea 6747
34f80b04 6748 if (CHIP_IS_E1(bp))
3101c2bc 6749 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 6750 else
3101c2bc 6751 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04
EG
6752
6753 if (bp->port.pmf)
6754 bnx2x_initial_phy_init(bp);
a2fbb9ea
ET
6755
6756 /* Start fast path */
34f80b04
EG
6757 switch (load_mode) {
6758 case LOAD_NORMAL:
6759 /* Tx queue should be only reenabled */
555f6c78 6760 netif_tx_wake_all_queues(bp->dev);
2dfe0e1f 6761 /* Initialize the receive filter. */
34f80b04
EG
6762 bnx2x_set_rx_mode(bp->dev);
6763 break;
6764
6765 case LOAD_OPEN:
555f6c78 6766 netif_tx_start_all_queues(bp->dev);
2dfe0e1f 6767 /* Initialize the receive filter. */
34f80b04 6768 bnx2x_set_rx_mode(bp->dev);
34f80b04 6769 break;
a2fbb9ea 6770
34f80b04 6771 case LOAD_DIAG:
2dfe0e1f 6772 /* Initialize the receive filter. */
a2fbb9ea 6773 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
6774 bp->state = BNX2X_STATE_DIAG;
6775 break;
6776
6777 default:
6778 break;
a2fbb9ea
ET
6779 }
6780
34f80b04
EG
6781 if (!bp->port.pmf)
6782 bnx2x__link_status_update(bp);
6783
a2fbb9ea
ET
6784 /* start the timer */
6785 mod_timer(&bp->timer, jiffies + bp->current_interval);
6786
34f80b04 6787
a2fbb9ea
ET
6788 return 0;
6789
2dfe0e1f
EG
6790load_error3:
6791 bnx2x_int_disable_sync(bp, 1);
6792 if (!BP_NOMCP(bp)) {
6793 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
6794 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6795 }
6796 bp->port.pmf = 0;
7a9b2557
VZ
6797 /* Free SKBs, SGEs, TPA pool and driver internals */
6798 bnx2x_free_skbs(bp);
555f6c78 6799 for_each_rx_queue(bp, i)
3196a88a 6800 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 6801load_error2:
d1014634
YG
6802 /* Release IRQs */
6803 bnx2x_free_irq(bp);
2dfe0e1f
EG
6804load_error1:
6805 bnx2x_napi_disable(bp);
555f6c78 6806 for_each_rx_queue(bp, i)
7cde1c8b 6807 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
6808 bnx2x_free_mem(bp);
6809
6810 /* TBD we really need to reset the chip
6811 if we want to recover from this */
34f80b04 6812 return rc;
a2fbb9ea
ET
6813}
6814
6815static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6816{
555f6c78 6817 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
6818 int rc;
6819
c14423fe 6820 /* halt the connection */
555f6c78
EG
6821 fp->state = BNX2X_FP_STATE_HALTING;
6822 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 6823
34f80b04 6824 /* Wait for completion */
a2fbb9ea 6825 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 6826 &(fp->state), 1);
c14423fe 6827 if (rc) /* timeout */
a2fbb9ea
ET
6828 return rc;
6829
6830 /* delete cfc entry */
6831 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6832
34f80b04
EG
6833 /* Wait for completion */
6834 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 6835 &(fp->state), 1);
34f80b04 6836 return rc;
a2fbb9ea
ET
6837}
6838
da5a662a 6839static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 6840{
49d66772 6841 u16 dsb_sp_prod_idx;
c14423fe 6842 /* if the other port is handling traffic,
a2fbb9ea 6843 this can take a lot of time */
34f80b04
EG
6844 int cnt = 500;
6845 int rc;
a2fbb9ea
ET
6846
6847 might_sleep();
6848
6849 /* Send HALT ramrod */
6850 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
34f80b04 6851 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
a2fbb9ea 6852
34f80b04
EG
6853 /* Wait for completion */
6854 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6855 &(bp->fp[0].state), 1);
6856 if (rc) /* timeout */
da5a662a 6857 return rc;
a2fbb9ea 6858
49d66772 6859 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 6860
228241eb 6861 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
6862 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
6863
49d66772 6864 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
6865 we are going to reset the chip anyway
6866 so there is not much to do if this times out
6867 */
34f80b04 6868 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
6869 if (!cnt) {
6870 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
6871 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
6872 *bp->dsb_sp_prod, dsb_sp_prod_idx);
6873#ifdef BNX2X_STOP_ON_ERROR
6874 bnx2x_panic();
da5a662a
VZ
6875#else
6876 rc = -EBUSY;
34f80b04
EG
6877#endif
6878 break;
6879 }
6880 cnt--;
da5a662a 6881 msleep(1);
5650d9d4 6882 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
6883 }
6884 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
6885 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
6886
6887 return rc;
a2fbb9ea
ET
6888}
6889
34f80b04
EG
6890static void bnx2x_reset_func(struct bnx2x *bp)
6891{
6892 int port = BP_PORT(bp);
6893 int func = BP_FUNC(bp);
6894 int base, i;
6895
6896 /* Configure IGU */
6897 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6898 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6899
34f80b04
EG
6900 /* Clear ILT */
6901 base = FUNC_ILT_BASE(func);
6902 for (i = base; i < base + ILT_PER_FUNC; i++)
6903 bnx2x_ilt_wr(bp, i, 0);
6904}
6905
6906static void bnx2x_reset_port(struct bnx2x *bp)
6907{
6908 int port = BP_PORT(bp);
6909 u32 val;
6910
6911 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
6912
6913 /* Do not rcv packets to BRB */
6914 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
6915 /* Do not direct rcv packets that are not for MCP to the BRB */
6916 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
6917 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
6918
6919 /* Configure AEU */
6920 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
6921
6922 msleep(100);
6923 /* Check for BRB port occupancy */
6924 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
6925 if (val)
6926 DP(NETIF_MSG_IFDOWN,
33471629 6927 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
6928
6929 /* TODO: Close Doorbell port? */
6930}
6931
34f80b04
EG
6932static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
6933{
6934 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
6935 BP_FUNC(bp), reset_code);
6936
6937 switch (reset_code) {
6938 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
6939 bnx2x_reset_port(bp);
6940 bnx2x_reset_func(bp);
6941 bnx2x_reset_common(bp);
6942 break;
6943
6944 case FW_MSG_CODE_DRV_UNLOAD_PORT:
6945 bnx2x_reset_port(bp);
6946 bnx2x_reset_func(bp);
6947 break;
6948
6949 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
6950 bnx2x_reset_func(bp);
6951 break;
49d66772 6952
34f80b04
EG
6953 default:
6954 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
6955 break;
6956 }
6957}
6958
33471629 6959/* must be called with rtnl_lock */
34f80b04 6960static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 6961{
da5a662a 6962 int port = BP_PORT(bp);
a2fbb9ea 6963 u32 reset_code = 0;
da5a662a 6964 int i, cnt, rc;
a2fbb9ea
ET
6965
6966 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
6967
228241eb
ET
6968 bp->rx_mode = BNX2X_RX_MODE_NONE;
6969 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 6970
f8ef6e44 6971 bnx2x_netif_stop(bp, 1);
e94d8af3 6972
34f80b04
EG
6973 del_timer_sync(&bp->timer);
6974 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
6975 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 6976 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 6977
70b9986c
EG
6978 /* Release IRQs */
6979 bnx2x_free_irq(bp);
6980
555f6c78
EG
6981 /* Wait until tx fastpath tasks complete */
6982 for_each_tx_queue(bp, i) {
228241eb
ET
6983 struct bnx2x_fastpath *fp = &bp->fp[i];
6984
34f80b04
EG
6985 cnt = 1000;
6986 smp_rmb();
e8b5fc51 6987 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 6988
65abd74d 6989 bnx2x_tx_int(fp, 1000);
34f80b04
EG
6990 if (!cnt) {
6991 BNX2X_ERR("timeout waiting for queue[%d]\n",
6992 i);
6993#ifdef BNX2X_STOP_ON_ERROR
6994 bnx2x_panic();
6995 return -EBUSY;
6996#else
6997 break;
6998#endif
6999 }
7000 cnt--;
da5a662a 7001 msleep(1);
34f80b04
EG
7002 smp_rmb();
7003 }
228241eb 7004 }
da5a662a
VZ
7005 /* Give HW time to discard old tx messages */
7006 msleep(1);
a2fbb9ea 7007
3101c2bc
YG
7008 if (CHIP_IS_E1(bp)) {
7009 struct mac_configuration_cmd *config =
7010 bnx2x_sp(bp, mcast_config);
7011
7012 bnx2x_set_mac_addr_e1(bp, 0);
7013
8d9c5f34 7014 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7015 CAM_INVALIDATE(config->config_table[i]);
7016
8d9c5f34 7017 config->hdr.length = i;
3101c2bc
YG
7018 if (CHIP_REV_IS_SLOW(bp))
7019 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7020 else
7021 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7022 config->hdr.client_id = BP_CL_ID(bp);
7023 config->hdr.reserved1 = 0;
7024
7025 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7026 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7027 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7028
7029 } else { /* E1H */
65abd74d
YG
7030 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7031
3101c2bc
YG
7032 bnx2x_set_mac_addr_e1h(bp, 0);
7033
7034 for (i = 0; i < MC_HASH_SIZE; i++)
7035 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7036 }
7037
65abd74d
YG
7038 if (unload_mode == UNLOAD_NORMAL)
7039 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7040
7041 else if (bp->flags & NO_WOL_FLAG) {
7042 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7043 if (CHIP_IS_E1H(bp))
7044 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7045
7046 } else if (bp->wol) {
7047 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7048 u8 *mac_addr = bp->dev->dev_addr;
7049 u32 val;
7050 /* The mac address is written to entries 1-4 to
7051 preserve entry 0 which is used by the PMF */
7052 u8 entry = (BP_E1HVN(bp) + 1)*8;
7053
7054 val = (mac_addr[0] << 8) | mac_addr[1];
7055 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7056
7057 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7058 (mac_addr[4] << 8) | mac_addr[5];
7059 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7060
7061 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7062
7063 } else
7064 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7065
34f80b04
EG
7066 /* Close multi and leading connections
7067 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
7068 for_each_nondefault_queue(bp, i)
7069 if (bnx2x_stop_multi(bp, i))
228241eb 7070 goto unload_error;
a2fbb9ea 7071
da5a662a
VZ
7072 rc = bnx2x_stop_leading(bp);
7073 if (rc) {
34f80b04 7074 BNX2X_ERR("Stop leading failed!\n");
da5a662a 7075#ifdef BNX2X_STOP_ON_ERROR
34f80b04 7076 return -EBUSY;
da5a662a
VZ
7077#else
7078 goto unload_error;
34f80b04 7079#endif
228241eb
ET
7080 }
7081
7082unload_error:
34f80b04 7083 if (!BP_NOMCP(bp))
228241eb 7084 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7085 else {
7086 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
7087 load_count[0], load_count[1], load_count[2]);
7088 load_count[0]--;
da5a662a 7089 load_count[1 + port]--;
34f80b04
EG
7090 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
7091 load_count[0], load_count[1], load_count[2]);
7092 if (load_count[0] == 0)
7093 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 7094 else if (load_count[1 + port] == 0)
34f80b04
EG
7095 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7096 else
7097 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7098 }
a2fbb9ea 7099
34f80b04
EG
7100 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7101 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7102 bnx2x__link_reset(bp);
a2fbb9ea
ET
7103
7104 /* Reset the chip */
228241eb 7105 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7106
7107 /* Report UNLOAD_DONE to MCP */
34f80b04 7108 if (!BP_NOMCP(bp))
a2fbb9ea 7109 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9a035440 7110 bp->port.pmf = 0;
a2fbb9ea 7111
7a9b2557 7112 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 7113 bnx2x_free_skbs(bp);
555f6c78 7114 for_each_rx_queue(bp, i)
3196a88a 7115 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 7116 for_each_rx_queue(bp, i)
7cde1c8b 7117 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7118 bnx2x_free_mem(bp);
7119
7120 bp->state = BNX2X_STATE_CLOSED;
228241eb 7121
a2fbb9ea
ET
7122 netif_carrier_off(bp->dev);
7123
7124 return 0;
7125}
7126
34f80b04
EG
7127static void bnx2x_reset_task(struct work_struct *work)
7128{
7129 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7130
7131#ifdef BNX2X_STOP_ON_ERROR
7132 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7133 " so reset not done to allow debug dump,\n"
7134 KERN_ERR " you will need to reboot when done\n");
7135 return;
7136#endif
7137
7138 rtnl_lock();
7139
7140 if (!netif_running(bp->dev))
7141 goto reset_task_exit;
7142
7143 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7144 bnx2x_nic_load(bp, LOAD_NORMAL);
7145
7146reset_task_exit:
7147 rtnl_unlock();
7148}
7149
a2fbb9ea
ET
7150/* end of nic load/unload */
7151
7152/* ethtool_ops */
7153
7154/*
7155 * Init service functions
7156 */
7157
f1ef27ef
EG
7158static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7159{
7160 switch (func) {
7161 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7162 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7163 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7164 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7165 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7166 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7167 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7168 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7169 default:
7170 BNX2X_ERR("Unsupported function index: %d\n", func);
7171 return (u32)(-1);
7172 }
7173}
7174
7175static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7176{
7177 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7178
7179 /* Flush all outstanding writes */
7180 mmiowb();
7181
7182 /* Pretend to be function 0 */
7183 REG_WR(bp, reg, 0);
7184 /* Flush the GRC transaction (in the chip) */
7185 new_val = REG_RD(bp, reg);
7186 if (new_val != 0) {
7187 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7188 new_val);
7189 BUG();
7190 }
7191
7192 /* From now we are in the "like-E1" mode */
7193 bnx2x_int_disable(bp);
7194
7195 /* Flush all outstanding writes */
7196 mmiowb();
7197
7198 /* Restore the original funtion settings */
7199 REG_WR(bp, reg, orig_func);
7200 new_val = REG_RD(bp, reg);
7201 if (new_val != orig_func) {
7202 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7203 orig_func, new_val);
7204 BUG();
7205 }
7206}
7207
7208static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7209{
7210 if (CHIP_IS_E1H(bp))
7211 bnx2x_undi_int_disable_e1h(bp, func);
7212 else
7213 bnx2x_int_disable(bp);
7214}
7215
34f80b04
EG
7216static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7217{
7218 u32 val;
7219
7220 /* Check if there is any driver already loaded */
7221 val = REG_RD(bp, MISC_REG_UNPREPARED);
7222 if (val == 0x1) {
7223 /* Check if it is the UNDI driver
7224 * UNDI driver initializes CID offset for normal bell to 0x7
7225 */
4a37fb66 7226 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7227 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7228 if (val == 0x7) {
7229 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7230 /* save our func */
34f80b04 7231 int func = BP_FUNC(bp);
da5a662a
VZ
7232 u32 swap_en;
7233 u32 swap_val;
34f80b04 7234
b4661739
EG
7235 /* clear the UNDI indication */
7236 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7237
34f80b04
EG
7238 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7239
7240 /* try unload UNDI on port 0 */
7241 bp->func = 0;
da5a662a
VZ
7242 bp->fw_seq =
7243 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7244 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 7245 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7246
7247 /* if UNDI is loaded on the other port */
7248 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7249
da5a662a
VZ
7250 /* send "DONE" for previous unload */
7251 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7252
7253 /* unload UNDI on port 1 */
34f80b04 7254 bp->func = 1;
da5a662a
VZ
7255 bp->fw_seq =
7256 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7257 DRV_MSG_SEQ_NUMBER_MASK);
7258 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7259
7260 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7261 }
7262
b4661739
EG
7263 /* now it's safe to release the lock */
7264 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7265
f1ef27ef 7266 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
7267
7268 /* close input traffic and wait for it */
7269 /* Do not rcv packets to BRB */
7270 REG_WR(bp,
7271 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7272 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7273 /* Do not direct rcv packets that are not for MCP to
7274 * the BRB */
7275 REG_WR(bp,
7276 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7277 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7278 /* clear AEU */
7279 REG_WR(bp,
7280 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7281 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7282 msleep(10);
7283
7284 /* save NIG port swap info */
7285 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7286 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7287 /* reset device */
7288 REG_WR(bp,
7289 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7290 0xd3ffffff);
34f80b04
EG
7291 REG_WR(bp,
7292 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7293 0x1403);
da5a662a
VZ
7294 /* take the NIG out of reset and restore swap values */
7295 REG_WR(bp,
7296 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7297 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7298 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7299 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7300
7301 /* send unload done to the MCP */
7302 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7303
7304 /* restore our func and fw_seq */
7305 bp->func = func;
7306 bp->fw_seq =
7307 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7308 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7309
7310 } else
7311 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7312 }
7313}
7314
7315static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7316{
7317 u32 val, val2, val3, val4, id;
72ce58c3 7318 u16 pmc;
34f80b04
EG
7319
7320 /* Get the chip revision id and number. */
7321 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7322 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7323 id = ((val & 0xffff) << 16);
7324 val = REG_RD(bp, MISC_REG_CHIP_REV);
7325 id |= ((val & 0xf) << 12);
7326 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7327 id |= ((val & 0xff) << 4);
5a40e08e 7328 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7329 id |= (val & 0xf);
7330 bp->common.chip_id = id;
7331 bp->link_params.chip_id = bp->common.chip_id;
7332 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7333
7334 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7335 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7336 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7337 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7338 bp->common.flash_size, bp->common.flash_size);
7339
7340 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7341 bp->link_params.shmem_base = bp->common.shmem_base;
7342 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7343
7344 if (!bp->common.shmem_base ||
7345 (bp->common.shmem_base < 0xA0000) ||
7346 (bp->common.shmem_base >= 0xC0000)) {
7347 BNX2X_DEV_INFO("MCP not active\n");
7348 bp->flags |= NO_MCP_FLAG;
7349 return;
7350 }
7351
7352 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7353 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7354 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7355 BNX2X_ERR("BAD MCP validity signature\n");
7356
7357 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7358 bp->common.board = SHMEM_RD(bp, dev_info.shared_hw_config.board);
7359
7360 BNX2X_DEV_INFO("hw_config 0x%08x board 0x%08x\n",
7361 bp->common.hw_config, bp->common.board);
7362
7363 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7364 SHARED_HW_CFG_LED_MODE_MASK) >>
7365 SHARED_HW_CFG_LED_MODE_SHIFT);
7366
7367 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7368 bp->common.bc_ver = val;
7369 BNX2X_DEV_INFO("bc_ver %X\n", val);
7370 if (val < BNX2X_BC_VER) {
7371 /* for now only warn
7372 * later we might need to enforce this */
7373 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7374 " please upgrade BC\n", BNX2X_BC_VER, val);
7375 }
72ce58c3
EG
7376
7377 if (BP_E1HVN(bp) == 0) {
7378 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7379 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7380 } else {
7381 /* no WOL capability for E1HVN != 0 */
7382 bp->flags |= NO_WOL_FLAG;
7383 }
7384 BNX2X_DEV_INFO("%sWoL capable\n",
7385 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
34f80b04
EG
7386
7387 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7388 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7389 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7390 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7391
7392 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7393 val, val2, val3, val4);
7394}
7395
7396static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7397 u32 switch_cfg)
a2fbb9ea 7398{
34f80b04 7399 int port = BP_PORT(bp);
a2fbb9ea
ET
7400 u32 ext_phy_type;
7401
a2fbb9ea
ET
7402 switch (switch_cfg) {
7403 case SWITCH_CFG_1G:
7404 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7405
c18487ee
YR
7406 ext_phy_type =
7407 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7408 switch (ext_phy_type) {
7409 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7410 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7411 ext_phy_type);
7412
34f80b04
EG
7413 bp->port.supported |= (SUPPORTED_10baseT_Half |
7414 SUPPORTED_10baseT_Full |
7415 SUPPORTED_100baseT_Half |
7416 SUPPORTED_100baseT_Full |
7417 SUPPORTED_1000baseT_Full |
7418 SUPPORTED_2500baseX_Full |
7419 SUPPORTED_TP |
7420 SUPPORTED_FIBRE |
7421 SUPPORTED_Autoneg |
7422 SUPPORTED_Pause |
7423 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7424 break;
7425
7426 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7427 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7428 ext_phy_type);
7429
34f80b04
EG
7430 bp->port.supported |= (SUPPORTED_10baseT_Half |
7431 SUPPORTED_10baseT_Full |
7432 SUPPORTED_100baseT_Half |
7433 SUPPORTED_100baseT_Full |
7434 SUPPORTED_1000baseT_Full |
7435 SUPPORTED_TP |
7436 SUPPORTED_FIBRE |
7437 SUPPORTED_Autoneg |
7438 SUPPORTED_Pause |
7439 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7440 break;
7441
7442 default:
7443 BNX2X_ERR("NVRAM config error. "
7444 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7445 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7446 return;
7447 }
7448
34f80b04
EG
7449 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7450 port*0x10);
7451 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7452 break;
7453
7454 case SWITCH_CFG_10G:
7455 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7456
c18487ee
YR
7457 ext_phy_type =
7458 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7459 switch (ext_phy_type) {
7460 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7461 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7462 ext_phy_type);
7463
34f80b04
EG
7464 bp->port.supported |= (SUPPORTED_10baseT_Half |
7465 SUPPORTED_10baseT_Full |
7466 SUPPORTED_100baseT_Half |
7467 SUPPORTED_100baseT_Full |
7468 SUPPORTED_1000baseT_Full |
7469 SUPPORTED_2500baseX_Full |
7470 SUPPORTED_10000baseT_Full |
7471 SUPPORTED_TP |
7472 SUPPORTED_FIBRE |
7473 SUPPORTED_Autoneg |
7474 SUPPORTED_Pause |
7475 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7476 break;
7477
7478 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
f1410647 7479 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
34f80b04 7480 ext_phy_type);
f1410647 7481
34f80b04
EG
7482 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7483 SUPPORTED_FIBRE |
7484 SUPPORTED_Pause |
7485 SUPPORTED_Asym_Pause);
f1410647
ET
7486 break;
7487
a2fbb9ea 7488 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
f1410647
ET
7489 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7490 ext_phy_type);
7491
34f80b04
EG
7492 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7493 SUPPORTED_1000baseT_Full |
7494 SUPPORTED_FIBRE |
7495 SUPPORTED_Pause |
7496 SUPPORTED_Asym_Pause);
f1410647
ET
7497 break;
7498
7499 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7500 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
a2fbb9ea
ET
7501 ext_phy_type);
7502
34f80b04
EG
7503 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7504 SUPPORTED_1000baseT_Full |
7505 SUPPORTED_FIBRE |
7506 SUPPORTED_Autoneg |
7507 SUPPORTED_Pause |
7508 SUPPORTED_Asym_Pause);
f1410647
ET
7509 break;
7510
c18487ee
YR
7511 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7512 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7513 ext_phy_type);
7514
34f80b04
EG
7515 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7516 SUPPORTED_2500baseX_Full |
7517 SUPPORTED_1000baseT_Full |
7518 SUPPORTED_FIBRE |
7519 SUPPORTED_Autoneg |
7520 SUPPORTED_Pause |
7521 SUPPORTED_Asym_Pause);
c18487ee
YR
7522 break;
7523
f1410647
ET
7524 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7525 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7526 ext_phy_type);
7527
34f80b04
EG
7528 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7529 SUPPORTED_TP |
7530 SUPPORTED_Autoneg |
7531 SUPPORTED_Pause |
7532 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7533 break;
7534
c18487ee
YR
7535 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7536 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7537 bp->link_params.ext_phy_config);
7538 break;
7539
a2fbb9ea
ET
7540 default:
7541 BNX2X_ERR("NVRAM config error. "
7542 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7543 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7544 return;
7545 }
7546
34f80b04
EG
7547 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7548 port*0x18);
7549 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7550
a2fbb9ea
ET
7551 break;
7552
7553 default:
7554 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7555 bp->port.link_config);
a2fbb9ea
ET
7556 return;
7557 }
34f80b04 7558 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7559
7560 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7561 if (!(bp->link_params.speed_cap_mask &
7562 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7563 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7564
c18487ee
YR
7565 if (!(bp->link_params.speed_cap_mask &
7566 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7567 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7568
c18487ee
YR
7569 if (!(bp->link_params.speed_cap_mask &
7570 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7571 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7572
c18487ee
YR
7573 if (!(bp->link_params.speed_cap_mask &
7574 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7575 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7576
c18487ee
YR
7577 if (!(bp->link_params.speed_cap_mask &
7578 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7579 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7580 SUPPORTED_1000baseT_Full);
a2fbb9ea 7581
c18487ee
YR
7582 if (!(bp->link_params.speed_cap_mask &
7583 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7584 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7585
c18487ee
YR
7586 if (!(bp->link_params.speed_cap_mask &
7587 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7588 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7589
34f80b04 7590 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7591}
7592
34f80b04 7593static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7594{
c18487ee 7595 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7596
34f80b04 7597 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7598 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7599 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7600 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7601 bp->port.advertising = bp->port.supported;
a2fbb9ea 7602 } else {
c18487ee
YR
7603 u32 ext_phy_type =
7604 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7605
7606 if ((ext_phy_type ==
7607 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7608 (ext_phy_type ==
7609 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7610 /* force 10G, no AN */
c18487ee 7611 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7612 bp->port.advertising =
a2fbb9ea
ET
7613 (ADVERTISED_10000baseT_Full |
7614 ADVERTISED_FIBRE);
7615 break;
7616 }
7617 BNX2X_ERR("NVRAM config error. "
7618 "Invalid link_config 0x%x"
7619 " Autoneg not supported\n",
34f80b04 7620 bp->port.link_config);
a2fbb9ea
ET
7621 return;
7622 }
7623 break;
7624
7625 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7626 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7627 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7628 bp->port.advertising = (ADVERTISED_10baseT_Full |
7629 ADVERTISED_TP);
a2fbb9ea
ET
7630 } else {
7631 BNX2X_ERR("NVRAM config error. "
7632 "Invalid link_config 0x%x"
7633 " speed_cap_mask 0x%x\n",
34f80b04 7634 bp->port.link_config,
c18487ee 7635 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7636 return;
7637 }
7638 break;
7639
7640 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7641 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7642 bp->link_params.req_line_speed = SPEED_10;
7643 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7644 bp->port.advertising = (ADVERTISED_10baseT_Half |
7645 ADVERTISED_TP);
a2fbb9ea
ET
7646 } else {
7647 BNX2X_ERR("NVRAM config error. "
7648 "Invalid link_config 0x%x"
7649 " speed_cap_mask 0x%x\n",
34f80b04 7650 bp->port.link_config,
c18487ee 7651 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7652 return;
7653 }
7654 break;
7655
7656 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7657 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7658 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7659 bp->port.advertising = (ADVERTISED_100baseT_Full |
7660 ADVERTISED_TP);
a2fbb9ea
ET
7661 } else {
7662 BNX2X_ERR("NVRAM config error. "
7663 "Invalid link_config 0x%x"
7664 " speed_cap_mask 0x%x\n",
34f80b04 7665 bp->port.link_config,
c18487ee 7666 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7667 return;
7668 }
7669 break;
7670
7671 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7672 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7673 bp->link_params.req_line_speed = SPEED_100;
7674 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7675 bp->port.advertising = (ADVERTISED_100baseT_Half |
7676 ADVERTISED_TP);
a2fbb9ea
ET
7677 } else {
7678 BNX2X_ERR("NVRAM config error. "
7679 "Invalid link_config 0x%x"
7680 " speed_cap_mask 0x%x\n",
34f80b04 7681 bp->port.link_config,
c18487ee 7682 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7683 return;
7684 }
7685 break;
7686
7687 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7688 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7689 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7690 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7691 ADVERTISED_TP);
a2fbb9ea
ET
7692 } else {
7693 BNX2X_ERR("NVRAM config error. "
7694 "Invalid link_config 0x%x"
7695 " speed_cap_mask 0x%x\n",
34f80b04 7696 bp->port.link_config,
c18487ee 7697 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7698 return;
7699 }
7700 break;
7701
7702 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 7703 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 7704 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
7705 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7706 ADVERTISED_TP);
a2fbb9ea
ET
7707 } else {
7708 BNX2X_ERR("NVRAM config error. "
7709 "Invalid link_config 0x%x"
7710 " speed_cap_mask 0x%x\n",
34f80b04 7711 bp->port.link_config,
c18487ee 7712 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7713 return;
7714 }
7715 break;
7716
7717 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7718 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7719 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 7720 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 7721 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
7722 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7723 ADVERTISED_FIBRE);
a2fbb9ea
ET
7724 } else {
7725 BNX2X_ERR("NVRAM config error. "
7726 "Invalid link_config 0x%x"
7727 " speed_cap_mask 0x%x\n",
34f80b04 7728 bp->port.link_config,
c18487ee 7729 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7730 return;
7731 }
7732 break;
7733
7734 default:
7735 BNX2X_ERR("NVRAM config error. "
7736 "BAD link speed link_config 0x%x\n",
34f80b04 7737 bp->port.link_config);
c18487ee 7738 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7739 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
7740 break;
7741 }
a2fbb9ea 7742
34f80b04
EG
7743 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7744 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 7745 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 7746 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 7747 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 7748
c18487ee 7749 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 7750 " advertising 0x%x\n",
c18487ee
YR
7751 bp->link_params.req_line_speed,
7752 bp->link_params.req_duplex,
34f80b04 7753 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
7754}
7755
34f80b04 7756static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 7757{
34f80b04
EG
7758 int port = BP_PORT(bp);
7759 u32 val, val2;
a2fbb9ea 7760
c18487ee 7761 bp->link_params.bp = bp;
34f80b04 7762 bp->link_params.port = port;
c18487ee 7763
c18487ee 7764 bp->link_params.serdes_config =
f1410647 7765 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
c18487ee 7766 bp->link_params.lane_config =
a2fbb9ea 7767 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 7768 bp->link_params.ext_phy_config =
a2fbb9ea
ET
7769 SHMEM_RD(bp,
7770 dev_info.port_hw_config[port].external_phy_config);
c18487ee 7771 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
7772 SHMEM_RD(bp,
7773 dev_info.port_hw_config[port].speed_capability_mask);
7774
34f80b04 7775 bp->port.link_config =
a2fbb9ea
ET
7776 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7777
34f80b04
EG
7778 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7779 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7780 " link_config 0x%08x\n",
c18487ee
YR
7781 bp->link_params.serdes_config,
7782 bp->link_params.lane_config,
7783 bp->link_params.ext_phy_config,
34f80b04 7784 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 7785
34f80b04 7786 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
7787 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7788 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
7789
7790 bnx2x_link_settings_requested(bp);
7791
7792 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7793 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7794 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7795 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7796 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7797 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7798 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7799 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
7800 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7801 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
7802}
7803
7804static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7805{
7806 int func = BP_FUNC(bp);
7807 u32 val, val2;
7808 int rc = 0;
a2fbb9ea 7809
34f80b04 7810 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 7811
34f80b04
EG
7812 bp->e1hov = 0;
7813 bp->e1hmf = 0;
7814 if (CHIP_IS_E1H(bp)) {
7815 bp->mf_config =
7816 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 7817
3196a88a
EG
7818 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7819 FUNC_MF_CFG_E1HOV_TAG_MASK);
34f80b04 7820 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 7821
34f80b04
EG
7822 bp->e1hov = val;
7823 bp->e1hmf = 1;
7824 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7825 "(0x%04x)\n",
7826 func, bp->e1hov, bp->e1hov);
7827 } else {
7828 BNX2X_DEV_INFO("Single function mode\n");
7829 if (BP_E1HVN(bp)) {
7830 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7831 " aborting\n", func);
7832 rc = -EPERM;
7833 }
7834 }
7835 }
a2fbb9ea 7836
34f80b04
EG
7837 if (!BP_NOMCP(bp)) {
7838 bnx2x_get_port_hwinfo(bp);
7839
7840 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7841 DRV_MSG_SEQ_NUMBER_MASK);
7842 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7843 }
7844
7845 if (IS_E1HMF(bp)) {
7846 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7847 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7848 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7849 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7850 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7851 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7852 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7853 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7854 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7855 bp->dev->dev_addr[5] = (u8)(val & 0xff);
7856 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
7857 ETH_ALEN);
7858 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
7859 ETH_ALEN);
a2fbb9ea 7860 }
34f80b04
EG
7861
7862 return rc;
a2fbb9ea
ET
7863 }
7864
34f80b04
EG
7865 if (BP_NOMCP(bp)) {
7866 /* only supposed to happen on emulation/FPGA */
33471629 7867 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
7868 random_ether_addr(bp->dev->dev_addr);
7869 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
7870 }
a2fbb9ea 7871
34f80b04
EG
7872 return rc;
7873}
7874
7875static int __devinit bnx2x_init_bp(struct bnx2x *bp)
7876{
7877 int func = BP_FUNC(bp);
7878 int rc;
7879
da5a662a
VZ
7880 /* Disable interrupt handling until HW is initialized */
7881 atomic_set(&bp->intr_sem, 1);
7882
34f80b04 7883 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 7884
1cf167f2 7885 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
7886 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
7887
7888 rc = bnx2x_get_hwinfo(bp);
7889
7890 /* need to reset chip if undi was active */
7891 if (!BP_NOMCP(bp))
7892 bnx2x_undi_unload(bp);
7893
7894 if (CHIP_REV_IS_FPGA(bp))
7895 printk(KERN_ERR PFX "FPGA detected\n");
7896
7897 if (BP_NOMCP(bp) && (func == 0))
7898 printk(KERN_ERR PFX
7899 "MCP disabled, must load devices in order!\n");
7900
555f6c78 7901 /* Set multi queue mode */
8badd27a
EG
7902 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
7903 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 7904 printk(KERN_ERR PFX
8badd27a 7905 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
7906 multi_mode = ETH_RSS_MODE_DISABLED;
7907 }
7908 bp->multi_mode = multi_mode;
7909
7910
7a9b2557
VZ
7911 /* Set TPA flags */
7912 if (disable_tpa) {
7913 bp->flags &= ~TPA_ENABLE_FLAG;
7914 bp->dev->features &= ~NETIF_F_LRO;
7915 } else {
7916 bp->flags |= TPA_ENABLE_FLAG;
7917 bp->dev->features |= NETIF_F_LRO;
7918 }
7919
7920
34f80b04
EG
7921 bp->tx_ring_size = MAX_TX_AVAIL;
7922 bp->rx_ring_size = MAX_RX_AVAIL;
7923
7924 bp->rx_csum = 1;
7925 bp->rx_offset = 0;
7926
7927 bp->tx_ticks = 50;
7928 bp->rx_ticks = 25;
7929
34f80b04
EG
7930 bp->timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
7931 bp->current_interval = (poll ? poll : bp->timer_interval);
7932
7933 init_timer(&bp->timer);
7934 bp->timer.expires = jiffies + bp->current_interval;
7935 bp->timer.data = (unsigned long) bp;
7936 bp->timer.function = bnx2x_timer;
7937
7938 return rc;
a2fbb9ea
ET
7939}
7940
7941/*
7942 * ethtool service functions
7943 */
7944
7945/* All ethtool functions called with rtnl_lock */
7946
7947static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
7948{
7949 struct bnx2x *bp = netdev_priv(dev);
7950
34f80b04
EG
7951 cmd->supported = bp->port.supported;
7952 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
7953
7954 if (netif_carrier_ok(dev)) {
c18487ee
YR
7955 cmd->speed = bp->link_vars.line_speed;
7956 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 7957 } else {
c18487ee
YR
7958 cmd->speed = bp->link_params.req_line_speed;
7959 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 7960 }
34f80b04
EG
7961 if (IS_E1HMF(bp)) {
7962 u16 vn_max_rate;
7963
7964 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
7965 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
7966 if (vn_max_rate < cmd->speed)
7967 cmd->speed = vn_max_rate;
7968 }
a2fbb9ea 7969
c18487ee
YR
7970 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
7971 u32 ext_phy_type =
7972 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
7973
7974 switch (ext_phy_type) {
7975 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7976 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7977 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7978 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 7979 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
f1410647
ET
7980 cmd->port = PORT_FIBRE;
7981 break;
7982
7983 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7984 cmd->port = PORT_TP;
7985 break;
7986
c18487ee
YR
7987 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7988 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7989 bp->link_params.ext_phy_config);
7990 break;
7991
f1410647
ET
7992 default:
7993 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
7994 bp->link_params.ext_phy_config);
7995 break;
f1410647
ET
7996 }
7997 } else
a2fbb9ea 7998 cmd->port = PORT_TP;
a2fbb9ea 7999
34f80b04 8000 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
8001 cmd->transceiver = XCVR_INTERNAL;
8002
c18487ee 8003 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 8004 cmd->autoneg = AUTONEG_ENABLE;
f1410647 8005 else
a2fbb9ea 8006 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
8007
8008 cmd->maxtxpkt = 0;
8009 cmd->maxrxpkt = 0;
8010
8011 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8012 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8013 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8014 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8015 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8016 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8017 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8018
8019 return 0;
8020}
8021
8022static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8023{
8024 struct bnx2x *bp = netdev_priv(dev);
8025 u32 advertising;
8026
34f80b04
EG
8027 if (IS_E1HMF(bp))
8028 return 0;
8029
a2fbb9ea
ET
8030 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8031 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8032 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8033 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8034 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8035 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8036 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8037
a2fbb9ea 8038 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
8039 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8040 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 8041 return -EINVAL;
f1410647 8042 }
a2fbb9ea
ET
8043
8044 /* advertise the requested speed and duplex if supported */
34f80b04 8045 cmd->advertising &= bp->port.supported;
a2fbb9ea 8046
c18487ee
YR
8047 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8048 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
8049 bp->port.advertising |= (ADVERTISED_Autoneg |
8050 cmd->advertising);
a2fbb9ea
ET
8051
8052 } else { /* forced speed */
8053 /* advertise the requested speed and duplex if supported */
8054 switch (cmd->speed) {
8055 case SPEED_10:
8056 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8057 if (!(bp->port.supported &
f1410647
ET
8058 SUPPORTED_10baseT_Full)) {
8059 DP(NETIF_MSG_LINK,
8060 "10M full not supported\n");
a2fbb9ea 8061 return -EINVAL;
f1410647 8062 }
a2fbb9ea
ET
8063
8064 advertising = (ADVERTISED_10baseT_Full |
8065 ADVERTISED_TP);
8066 } else {
34f80b04 8067 if (!(bp->port.supported &
f1410647
ET
8068 SUPPORTED_10baseT_Half)) {
8069 DP(NETIF_MSG_LINK,
8070 "10M half not supported\n");
a2fbb9ea 8071 return -EINVAL;
f1410647 8072 }
a2fbb9ea
ET
8073
8074 advertising = (ADVERTISED_10baseT_Half |
8075 ADVERTISED_TP);
8076 }
8077 break;
8078
8079 case SPEED_100:
8080 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8081 if (!(bp->port.supported &
f1410647
ET
8082 SUPPORTED_100baseT_Full)) {
8083 DP(NETIF_MSG_LINK,
8084 "100M full not supported\n");
a2fbb9ea 8085 return -EINVAL;
f1410647 8086 }
a2fbb9ea
ET
8087
8088 advertising = (ADVERTISED_100baseT_Full |
8089 ADVERTISED_TP);
8090 } else {
34f80b04 8091 if (!(bp->port.supported &
f1410647
ET
8092 SUPPORTED_100baseT_Half)) {
8093 DP(NETIF_MSG_LINK,
8094 "100M half not supported\n");
a2fbb9ea 8095 return -EINVAL;
f1410647 8096 }
a2fbb9ea
ET
8097
8098 advertising = (ADVERTISED_100baseT_Half |
8099 ADVERTISED_TP);
8100 }
8101 break;
8102
8103 case SPEED_1000:
f1410647
ET
8104 if (cmd->duplex != DUPLEX_FULL) {
8105 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 8106 return -EINVAL;
f1410647 8107 }
a2fbb9ea 8108
34f80b04 8109 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 8110 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 8111 return -EINVAL;
f1410647 8112 }
a2fbb9ea
ET
8113
8114 advertising = (ADVERTISED_1000baseT_Full |
8115 ADVERTISED_TP);
8116 break;
8117
8118 case SPEED_2500:
f1410647
ET
8119 if (cmd->duplex != DUPLEX_FULL) {
8120 DP(NETIF_MSG_LINK,
8121 "2.5G half not supported\n");
a2fbb9ea 8122 return -EINVAL;
f1410647 8123 }
a2fbb9ea 8124
34f80b04 8125 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
8126 DP(NETIF_MSG_LINK,
8127 "2.5G full not supported\n");
a2fbb9ea 8128 return -EINVAL;
f1410647 8129 }
a2fbb9ea 8130
f1410647 8131 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
8132 ADVERTISED_TP);
8133 break;
8134
8135 case SPEED_10000:
f1410647
ET
8136 if (cmd->duplex != DUPLEX_FULL) {
8137 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 8138 return -EINVAL;
f1410647 8139 }
a2fbb9ea 8140
34f80b04 8141 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 8142 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 8143 return -EINVAL;
f1410647 8144 }
a2fbb9ea
ET
8145
8146 advertising = (ADVERTISED_10000baseT_Full |
8147 ADVERTISED_FIBRE);
8148 break;
8149
8150 default:
f1410647 8151 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
8152 return -EINVAL;
8153 }
8154
c18487ee
YR
8155 bp->link_params.req_line_speed = cmd->speed;
8156 bp->link_params.req_duplex = cmd->duplex;
34f80b04 8157 bp->port.advertising = advertising;
a2fbb9ea
ET
8158 }
8159
c18487ee 8160 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 8161 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 8162 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 8163 bp->port.advertising);
a2fbb9ea 8164
34f80b04 8165 if (netif_running(dev)) {
bb2a0f7a 8166 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8167 bnx2x_link_set(bp);
8168 }
a2fbb9ea
ET
8169
8170 return 0;
8171}
8172
c18487ee
YR
8173#define PHY_FW_VER_LEN 10
8174
a2fbb9ea
ET
8175static void bnx2x_get_drvinfo(struct net_device *dev,
8176 struct ethtool_drvinfo *info)
8177{
8178 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 8179 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
8180
8181 strcpy(info->driver, DRV_MODULE_NAME);
8182 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
8183
8184 phy_fw_ver[0] = '\0';
34f80b04 8185 if (bp->port.pmf) {
4a37fb66 8186 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8187 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8188 (bp->state != BNX2X_STATE_CLOSED),
8189 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 8190 bnx2x_release_phy_lock(bp);
34f80b04 8191 }
c18487ee 8192
f0e53a84
EG
8193 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8194 (bp->common.bc_ver & 0xff0000) >> 16,
8195 (bp->common.bc_ver & 0xff00) >> 8,
8196 (bp->common.bc_ver & 0xff),
8197 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
8198 strcpy(info->bus_info, pci_name(bp->pdev));
8199 info->n_stats = BNX2X_NUM_STATS;
8200 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 8201 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
8202 info->regdump_len = 0;
8203}
8204
8205static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8206{
8207 struct bnx2x *bp = netdev_priv(dev);
8208
8209 if (bp->flags & NO_WOL_FLAG) {
8210 wol->supported = 0;
8211 wol->wolopts = 0;
8212 } else {
8213 wol->supported = WAKE_MAGIC;
8214 if (bp->wol)
8215 wol->wolopts = WAKE_MAGIC;
8216 else
8217 wol->wolopts = 0;
8218 }
8219 memset(&wol->sopass, 0, sizeof(wol->sopass));
8220}
8221
8222static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8223{
8224 struct bnx2x *bp = netdev_priv(dev);
8225
8226 if (wol->wolopts & ~WAKE_MAGIC)
8227 return -EINVAL;
8228
8229 if (wol->wolopts & WAKE_MAGIC) {
8230 if (bp->flags & NO_WOL_FLAG)
8231 return -EINVAL;
8232
8233 bp->wol = 1;
34f80b04 8234 } else
a2fbb9ea 8235 bp->wol = 0;
34f80b04 8236
a2fbb9ea
ET
8237 return 0;
8238}
8239
8240static u32 bnx2x_get_msglevel(struct net_device *dev)
8241{
8242 struct bnx2x *bp = netdev_priv(dev);
8243
8244 return bp->msglevel;
8245}
8246
8247static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8248{
8249 struct bnx2x *bp = netdev_priv(dev);
8250
8251 if (capable(CAP_NET_ADMIN))
8252 bp->msglevel = level;
8253}
8254
8255static int bnx2x_nway_reset(struct net_device *dev)
8256{
8257 struct bnx2x *bp = netdev_priv(dev);
8258
34f80b04
EG
8259 if (!bp->port.pmf)
8260 return 0;
a2fbb9ea 8261
34f80b04 8262 if (netif_running(dev)) {
bb2a0f7a 8263 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8264 bnx2x_link_set(bp);
8265 }
a2fbb9ea
ET
8266
8267 return 0;
8268}
8269
8270static int bnx2x_get_eeprom_len(struct net_device *dev)
8271{
8272 struct bnx2x *bp = netdev_priv(dev);
8273
34f80b04 8274 return bp->common.flash_size;
a2fbb9ea
ET
8275}
8276
8277static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8278{
34f80b04 8279 int port = BP_PORT(bp);
a2fbb9ea
ET
8280 int count, i;
8281 u32 val = 0;
8282
8283 /* adjust timeout for emulation/FPGA */
8284 count = NVRAM_TIMEOUT_COUNT;
8285 if (CHIP_REV_IS_SLOW(bp))
8286 count *= 100;
8287
8288 /* request access to nvram interface */
8289 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8290 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8291
8292 for (i = 0; i < count*10; i++) {
8293 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8294 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8295 break;
8296
8297 udelay(5);
8298 }
8299
8300 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 8301 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
8302 return -EBUSY;
8303 }
8304
8305 return 0;
8306}
8307
8308static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8309{
34f80b04 8310 int port = BP_PORT(bp);
a2fbb9ea
ET
8311 int count, i;
8312 u32 val = 0;
8313
8314 /* adjust timeout for emulation/FPGA */
8315 count = NVRAM_TIMEOUT_COUNT;
8316 if (CHIP_REV_IS_SLOW(bp))
8317 count *= 100;
8318
8319 /* relinquish nvram interface */
8320 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8321 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8322
8323 for (i = 0; i < count*10; i++) {
8324 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8325 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8326 break;
8327
8328 udelay(5);
8329 }
8330
8331 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 8332 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
8333 return -EBUSY;
8334 }
8335
8336 return 0;
8337}
8338
8339static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8340{
8341 u32 val;
8342
8343 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8344
8345 /* enable both bits, even on read */
8346 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8347 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8348 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8349}
8350
8351static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8352{
8353 u32 val;
8354
8355 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8356
8357 /* disable both bits, even after read */
8358 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8359 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8360 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8361}
8362
8363static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8364 u32 cmd_flags)
8365{
f1410647 8366 int count, i, rc;
a2fbb9ea
ET
8367 u32 val;
8368
8369 /* build the command word */
8370 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8371
8372 /* need to clear DONE bit separately */
8373 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8374
8375 /* address of the NVRAM to read from */
8376 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8377 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8378
8379 /* issue a read command */
8380 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8381
8382 /* adjust timeout for emulation/FPGA */
8383 count = NVRAM_TIMEOUT_COUNT;
8384 if (CHIP_REV_IS_SLOW(bp))
8385 count *= 100;
8386
8387 /* wait for completion */
8388 *ret_val = 0;
8389 rc = -EBUSY;
8390 for (i = 0; i < count; i++) {
8391 udelay(5);
8392 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8393
8394 if (val & MCPR_NVM_COMMAND_DONE) {
8395 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
8396 /* we read nvram data in cpu order
8397 * but ethtool sees it as an array of bytes
8398 * converting to big-endian will do the work */
8399 val = cpu_to_be32(val);
8400 *ret_val = val;
8401 rc = 0;
8402 break;
8403 }
8404 }
8405
8406 return rc;
8407}
8408
8409static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8410 int buf_size)
8411{
8412 int rc;
8413 u32 cmd_flags;
8414 u32 val;
8415
8416 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8417 DP(BNX2X_MSG_NVM,
c14423fe 8418 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8419 offset, buf_size);
8420 return -EINVAL;
8421 }
8422
34f80b04
EG
8423 if (offset + buf_size > bp->common.flash_size) {
8424 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8425 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8426 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8427 return -EINVAL;
8428 }
8429
8430 /* request access to nvram interface */
8431 rc = bnx2x_acquire_nvram_lock(bp);
8432 if (rc)
8433 return rc;
8434
8435 /* enable access to nvram interface */
8436 bnx2x_enable_nvram_access(bp);
8437
8438 /* read the first word(s) */
8439 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8440 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8441 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8442 memcpy(ret_buf, &val, 4);
8443
8444 /* advance to the next dword */
8445 offset += sizeof(u32);
8446 ret_buf += sizeof(u32);
8447 buf_size -= sizeof(u32);
8448 cmd_flags = 0;
8449 }
8450
8451 if (rc == 0) {
8452 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8453 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8454 memcpy(ret_buf, &val, 4);
8455 }
8456
8457 /* disable access to nvram interface */
8458 bnx2x_disable_nvram_access(bp);
8459 bnx2x_release_nvram_lock(bp);
8460
8461 return rc;
8462}
8463
8464static int bnx2x_get_eeprom(struct net_device *dev,
8465 struct ethtool_eeprom *eeprom, u8 *eebuf)
8466{
8467 struct bnx2x *bp = netdev_priv(dev);
8468 int rc;
8469
2add3acb
EG
8470 if (!netif_running(dev))
8471 return -EAGAIN;
8472
34f80b04 8473 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8474 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8475 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8476 eeprom->len, eeprom->len);
8477
8478 /* parameters already validated in ethtool_get_eeprom */
8479
8480 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8481
8482 return rc;
8483}
8484
8485static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8486 u32 cmd_flags)
8487{
f1410647 8488 int count, i, rc;
a2fbb9ea
ET
8489
8490 /* build the command word */
8491 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8492
8493 /* need to clear DONE bit separately */
8494 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8495
8496 /* write the data */
8497 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8498
8499 /* address of the NVRAM to write to */
8500 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8501 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8502
8503 /* issue the write command */
8504 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8505
8506 /* adjust timeout for emulation/FPGA */
8507 count = NVRAM_TIMEOUT_COUNT;
8508 if (CHIP_REV_IS_SLOW(bp))
8509 count *= 100;
8510
8511 /* wait for completion */
8512 rc = -EBUSY;
8513 for (i = 0; i < count; i++) {
8514 udelay(5);
8515 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8516 if (val & MCPR_NVM_COMMAND_DONE) {
8517 rc = 0;
8518 break;
8519 }
8520 }
8521
8522 return rc;
8523}
8524
f1410647 8525#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8526
8527static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8528 int buf_size)
8529{
8530 int rc;
8531 u32 cmd_flags;
8532 u32 align_offset;
8533 u32 val;
8534
34f80b04
EG
8535 if (offset + buf_size > bp->common.flash_size) {
8536 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8537 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8538 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8539 return -EINVAL;
8540 }
8541
8542 /* request access to nvram interface */
8543 rc = bnx2x_acquire_nvram_lock(bp);
8544 if (rc)
8545 return rc;
8546
8547 /* enable access to nvram interface */
8548 bnx2x_enable_nvram_access(bp);
8549
8550 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8551 align_offset = (offset & ~0x03);
8552 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8553
8554 if (rc == 0) {
8555 val &= ~(0xff << BYTE_OFFSET(offset));
8556 val |= (*data_buf << BYTE_OFFSET(offset));
8557
8558 /* nvram data is returned as an array of bytes
8559 * convert it back to cpu order */
8560 val = be32_to_cpu(val);
8561
a2fbb9ea
ET
8562 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8563 cmd_flags);
8564 }
8565
8566 /* disable access to nvram interface */
8567 bnx2x_disable_nvram_access(bp);
8568 bnx2x_release_nvram_lock(bp);
8569
8570 return rc;
8571}
8572
8573static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8574 int buf_size)
8575{
8576 int rc;
8577 u32 cmd_flags;
8578 u32 val;
8579 u32 written_so_far;
8580
34f80b04 8581 if (buf_size == 1) /* ethtool */
a2fbb9ea 8582 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
8583
8584 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8585 DP(BNX2X_MSG_NVM,
c14423fe 8586 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8587 offset, buf_size);
8588 return -EINVAL;
8589 }
8590
34f80b04
EG
8591 if (offset + buf_size > bp->common.flash_size) {
8592 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8593 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8594 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8595 return -EINVAL;
8596 }
8597
8598 /* request access to nvram interface */
8599 rc = bnx2x_acquire_nvram_lock(bp);
8600 if (rc)
8601 return rc;
8602
8603 /* enable access to nvram interface */
8604 bnx2x_enable_nvram_access(bp);
8605
8606 written_so_far = 0;
8607 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8608 while ((written_so_far < buf_size) && (rc == 0)) {
8609 if (written_so_far == (buf_size - sizeof(u32)))
8610 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8611 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8612 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8613 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8614 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8615
8616 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
8617
8618 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8619
8620 /* advance to the next dword */
8621 offset += sizeof(u32);
8622 data_buf += sizeof(u32);
8623 written_so_far += sizeof(u32);
8624 cmd_flags = 0;
8625 }
8626
8627 /* disable access to nvram interface */
8628 bnx2x_disable_nvram_access(bp);
8629 bnx2x_release_nvram_lock(bp);
8630
8631 return rc;
8632}
8633
8634static int bnx2x_set_eeprom(struct net_device *dev,
8635 struct ethtool_eeprom *eeprom, u8 *eebuf)
8636{
8637 struct bnx2x *bp = netdev_priv(dev);
8638 int rc;
8639
9f4c9583
EG
8640 if (!netif_running(dev))
8641 return -EAGAIN;
8642
34f80b04 8643 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8644 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8645 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8646 eeprom->len, eeprom->len);
8647
8648 /* parameters already validated in ethtool_set_eeprom */
8649
c18487ee 8650 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
8651 if (eeprom->magic == 0x00504859)
8652 if (bp->port.pmf) {
8653
4a37fb66 8654 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8655 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8656 bp->link_params.ext_phy_config,
8657 (bp->state != BNX2X_STATE_CLOSED),
8658 eebuf, eeprom->len);
bb2a0f7a
YG
8659 if ((bp->state == BNX2X_STATE_OPEN) ||
8660 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04
EG
8661 rc |= bnx2x_link_reset(&bp->link_params,
8662 &bp->link_vars);
8663 rc |= bnx2x_phy_init(&bp->link_params,
8664 &bp->link_vars);
bb2a0f7a 8665 }
4a37fb66 8666 bnx2x_release_phy_lock(bp);
34f80b04
EG
8667
8668 } else /* Only the PMF can access the PHY */
8669 return -EINVAL;
8670 else
c18487ee 8671 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
8672
8673 return rc;
8674}
8675
8676static int bnx2x_get_coalesce(struct net_device *dev,
8677 struct ethtool_coalesce *coal)
8678{
8679 struct bnx2x *bp = netdev_priv(dev);
8680
8681 memset(coal, 0, sizeof(struct ethtool_coalesce));
8682
8683 coal->rx_coalesce_usecs = bp->rx_ticks;
8684 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
8685
8686 return 0;
8687}
8688
8689static int bnx2x_set_coalesce(struct net_device *dev,
8690 struct ethtool_coalesce *coal)
8691{
8692 struct bnx2x *bp = netdev_priv(dev);
8693
8694 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8695 if (bp->rx_ticks > 3000)
8696 bp->rx_ticks = 3000;
8697
8698 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8699 if (bp->tx_ticks > 0x3000)
8700 bp->tx_ticks = 0x3000;
8701
34f80b04 8702 if (netif_running(dev))
a2fbb9ea
ET
8703 bnx2x_update_coalesce(bp);
8704
8705 return 0;
8706}
8707
8708static void bnx2x_get_ringparam(struct net_device *dev,
8709 struct ethtool_ringparam *ering)
8710{
8711 struct bnx2x *bp = netdev_priv(dev);
8712
8713 ering->rx_max_pending = MAX_RX_AVAIL;
8714 ering->rx_mini_max_pending = 0;
8715 ering->rx_jumbo_max_pending = 0;
8716
8717 ering->rx_pending = bp->rx_ring_size;
8718 ering->rx_mini_pending = 0;
8719 ering->rx_jumbo_pending = 0;
8720
8721 ering->tx_max_pending = MAX_TX_AVAIL;
8722 ering->tx_pending = bp->tx_ring_size;
8723}
8724
8725static int bnx2x_set_ringparam(struct net_device *dev,
8726 struct ethtool_ringparam *ering)
8727{
8728 struct bnx2x *bp = netdev_priv(dev);
34f80b04 8729 int rc = 0;
a2fbb9ea
ET
8730
8731 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8732 (ering->tx_pending > MAX_TX_AVAIL) ||
8733 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8734 return -EINVAL;
8735
8736 bp->rx_ring_size = ering->rx_pending;
8737 bp->tx_ring_size = ering->tx_pending;
8738
34f80b04
EG
8739 if (netif_running(dev)) {
8740 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8741 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
8742 }
8743
34f80b04 8744 return rc;
a2fbb9ea
ET
8745}
8746
8747static void bnx2x_get_pauseparam(struct net_device *dev,
8748 struct ethtool_pauseparam *epause)
8749{
8750 struct bnx2x *bp = netdev_priv(dev);
8751
c0700f90 8752 epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
8753 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8754
c0700f90
DM
8755 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8756 BNX2X_FLOW_CTRL_RX);
8757 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8758 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
8759
8760 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8761 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8762 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8763}
8764
8765static int bnx2x_set_pauseparam(struct net_device *dev,
8766 struct ethtool_pauseparam *epause)
8767{
8768 struct bnx2x *bp = netdev_priv(dev);
8769
34f80b04
EG
8770 if (IS_E1HMF(bp))
8771 return 0;
8772
a2fbb9ea
ET
8773 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8774 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8775 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8776
c0700f90 8777 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 8778
f1410647 8779 if (epause->rx_pause)
c0700f90 8780 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 8781
f1410647 8782 if (epause->tx_pause)
c0700f90 8783 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 8784
c0700f90
DM
8785 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8786 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8787
c18487ee 8788 if (epause->autoneg) {
34f80b04 8789 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 8790 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
8791 return -EINVAL;
8792 }
a2fbb9ea 8793
c18487ee 8794 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 8795 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 8796 }
a2fbb9ea 8797
c18487ee
YR
8798 DP(NETIF_MSG_LINK,
8799 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
8800
8801 if (netif_running(dev)) {
bb2a0f7a 8802 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8803 bnx2x_link_set(bp);
8804 }
a2fbb9ea
ET
8805
8806 return 0;
8807}
8808
df0f2343
VZ
8809static int bnx2x_set_flags(struct net_device *dev, u32 data)
8810{
8811 struct bnx2x *bp = netdev_priv(dev);
8812 int changed = 0;
8813 int rc = 0;
8814
8815 /* TPA requires Rx CSUM offloading */
8816 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8817 if (!(dev->features & NETIF_F_LRO)) {
8818 dev->features |= NETIF_F_LRO;
8819 bp->flags |= TPA_ENABLE_FLAG;
8820 changed = 1;
8821 }
8822
8823 } else if (dev->features & NETIF_F_LRO) {
8824 dev->features &= ~NETIF_F_LRO;
8825 bp->flags &= ~TPA_ENABLE_FLAG;
8826 changed = 1;
8827 }
8828
8829 if (changed && netif_running(dev)) {
8830 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8831 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8832 }
8833
8834 return rc;
8835}
8836
a2fbb9ea
ET
8837static u32 bnx2x_get_rx_csum(struct net_device *dev)
8838{
8839 struct bnx2x *bp = netdev_priv(dev);
8840
8841 return bp->rx_csum;
8842}
8843
8844static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8845{
8846 struct bnx2x *bp = netdev_priv(dev);
df0f2343 8847 int rc = 0;
a2fbb9ea
ET
8848
8849 bp->rx_csum = data;
df0f2343
VZ
8850
8851 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
8852 TPA'ed packets will be discarded due to wrong TCP CSUM */
8853 if (!data) {
8854 u32 flags = ethtool_op_get_flags(dev);
8855
8856 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
8857 }
8858
8859 return rc;
a2fbb9ea
ET
8860}
8861
8862static int bnx2x_set_tso(struct net_device *dev, u32 data)
8863{
755735eb 8864 if (data) {
a2fbb9ea 8865 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8866 dev->features |= NETIF_F_TSO6;
8867 } else {
a2fbb9ea 8868 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
8869 dev->features &= ~NETIF_F_TSO6;
8870 }
8871
a2fbb9ea
ET
8872 return 0;
8873}
8874
f3c87cdd 8875static const struct {
a2fbb9ea
ET
8876 char string[ETH_GSTRING_LEN];
8877} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
8878 { "register_test (offline)" },
8879 { "memory_test (offline)" },
8880 { "loopback_test (offline)" },
8881 { "nvram_test (online)" },
8882 { "interrupt_test (online)" },
8883 { "link_test (online)" },
d3d4f495 8884 { "idle check (online)" }
a2fbb9ea
ET
8885};
8886
8887static int bnx2x_self_test_count(struct net_device *dev)
8888{
8889 return BNX2X_NUM_TESTS;
8890}
8891
f3c87cdd
YG
8892static int bnx2x_test_registers(struct bnx2x *bp)
8893{
8894 int idx, i, rc = -ENODEV;
8895 u32 wr_val = 0;
9dabc424 8896 int port = BP_PORT(bp);
f3c87cdd
YG
8897 static const struct {
8898 u32 offset0;
8899 u32 offset1;
8900 u32 mask;
8901 } reg_tbl[] = {
8902/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
8903 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
8904 { HC_REG_AGG_INT_0, 4, 0x000003ff },
8905 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
8906 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
8907 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
8908 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
8909 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8910 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
8911 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
8912/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
8913 { QM_REG_CONNNUM_0, 4, 0x000fffff },
8914 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
8915 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
8916 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
8917 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
8918 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
8919 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
8920 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
8921 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
8922/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
8923 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
8924 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
8925 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
8926 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
8927 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
8928 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
8929 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
8930 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
8931 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
8932/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
8933 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
8934 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
8935 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
8936 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
8937 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
8938 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
8939 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
8940
8941 { 0xffffffff, 0, 0x00000000 }
8942 };
8943
8944 if (!netif_running(bp->dev))
8945 return rc;
8946
8947 /* Repeat the test twice:
8948 First by writing 0x00000000, second by writing 0xffffffff */
8949 for (idx = 0; idx < 2; idx++) {
8950
8951 switch (idx) {
8952 case 0:
8953 wr_val = 0;
8954 break;
8955 case 1:
8956 wr_val = 0xffffffff;
8957 break;
8958 }
8959
8960 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
8961 u32 offset, mask, save_val, val;
f3c87cdd
YG
8962
8963 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
8964 mask = reg_tbl[i].mask;
8965
8966 save_val = REG_RD(bp, offset);
8967
8968 REG_WR(bp, offset, wr_val);
8969 val = REG_RD(bp, offset);
8970
8971 /* Restore the original register's value */
8972 REG_WR(bp, offset, save_val);
8973
8974 /* verify that value is as expected value */
8975 if ((val & mask) != (wr_val & mask))
8976 goto test_reg_exit;
8977 }
8978 }
8979
8980 rc = 0;
8981
8982test_reg_exit:
8983 return rc;
8984}
8985
8986static int bnx2x_test_memory(struct bnx2x *bp)
8987{
8988 int i, j, rc = -ENODEV;
8989 u32 val;
8990 static const struct {
8991 u32 offset;
8992 int size;
8993 } mem_tbl[] = {
8994 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
8995 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
8996 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
8997 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
8998 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
8999 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9000 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9001
9002 { 0xffffffff, 0 }
9003 };
9004 static const struct {
9005 char *name;
9006 u32 offset;
9dabc424
YG
9007 u32 e1_mask;
9008 u32 e1h_mask;
f3c87cdd 9009 } prty_tbl[] = {
9dabc424
YG
9010 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9011 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9012 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9013 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9014 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9015 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9016
9017 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
9018 };
9019
9020 if (!netif_running(bp->dev))
9021 return rc;
9022
9023 /* Go through all the memories */
9024 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9025 for (j = 0; j < mem_tbl[i].size; j++)
9026 REG_RD(bp, mem_tbl[i].offset + j*4);
9027
9028 /* Check the parity status */
9029 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9030 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
9031 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9032 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
9033 DP(NETIF_MSG_HW,
9034 "%s is 0x%x\n", prty_tbl[i].name, val);
9035 goto test_mem_exit;
9036 }
9037 }
9038
9039 rc = 0;
9040
9041test_mem_exit:
9042 return rc;
9043}
9044
f3c87cdd
YG
9045static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9046{
9047 int cnt = 1000;
9048
9049 if (link_up)
9050 while (bnx2x_link_test(bp) && cnt--)
9051 msleep(10);
9052}
9053
9054static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9055{
9056 unsigned int pkt_size, num_pkts, i;
9057 struct sk_buff *skb;
9058 unsigned char *packet;
9059 struct bnx2x_fastpath *fp = &bp->fp[0];
9060 u16 tx_start_idx, tx_idx;
9061 u16 rx_start_idx, rx_idx;
9062 u16 pkt_prod;
9063 struct sw_tx_bd *tx_buf;
9064 struct eth_tx_bd *tx_bd;
9065 dma_addr_t mapping;
9066 union eth_rx_cqe *cqe;
9067 u8 cqe_fp_flags;
9068 struct sw_rx_bd *rx_buf;
9069 u16 len;
9070 int rc = -ENODEV;
9071
9072 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
9073 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 9074 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
f3c87cdd
YG
9075
9076 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
3910c8ae 9077 u16 cnt = 1000;
f3c87cdd 9078 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
f3c87cdd 9079 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
f3c87cdd 9080 /* wait until link state is restored */
3910c8ae
EG
9081 if (link_up)
9082 while (cnt-- && bnx2x_test_link(&bp->link_params,
9083 &bp->link_vars))
9084 msleep(10);
f3c87cdd
YG
9085 } else
9086 return -EINVAL;
9087
9088 pkt_size = 1514;
9089 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9090 if (!skb) {
9091 rc = -ENOMEM;
9092 goto test_loopback_exit;
9093 }
9094 packet = skb_put(skb, pkt_size);
9095 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9096 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9097 for (i = ETH_HLEN; i < pkt_size; i++)
9098 packet[i] = (unsigned char) (i & 0xff);
9099
9100 num_pkts = 0;
9101 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9102 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9103
9104 pkt_prod = fp->tx_pkt_prod++;
9105 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9106 tx_buf->first_bd = fp->tx_bd_prod;
9107 tx_buf->skb = skb;
9108
9109 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9110 mapping = pci_map_single(bp->pdev, skb->data,
9111 skb_headlen(skb), PCI_DMA_TODEVICE);
9112 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9113 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9114 tx_bd->nbd = cpu_to_le16(1);
9115 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9116 tx_bd->vlan = cpu_to_le16(pkt_prod);
9117 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9118 ETH_TX_BD_FLAGS_END_BD);
9119 tx_bd->general_data = ((UNICAST_ADDRESS <<
9120 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9121
58f4c4cf
EG
9122 wmb();
9123
f3c87cdd
YG
9124 fp->hw_tx_prods->bds_prod =
9125 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
9126 mb(); /* FW restriction: must not reorder writing nbd and packets */
9127 fp->hw_tx_prods->packets_prod =
9128 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9129 DOORBELL(bp, FP_IDX(fp), 0);
9130
9131 mmiowb();
9132
9133 num_pkts++;
9134 fp->tx_bd_prod++;
9135 bp->dev->trans_start = jiffies;
9136
9137 udelay(100);
9138
9139 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9140 if (tx_idx != tx_start_idx + num_pkts)
9141 goto test_loopback_exit;
9142
9143 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9144 if (rx_idx != rx_start_idx + num_pkts)
9145 goto test_loopback_exit;
9146
9147 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9148 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9149 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9150 goto test_loopback_rx_exit;
9151
9152 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9153 if (len != pkt_size)
9154 goto test_loopback_rx_exit;
9155
9156 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9157 skb = rx_buf->skb;
9158 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9159 for (i = ETH_HLEN; i < pkt_size; i++)
9160 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9161 goto test_loopback_rx_exit;
9162
9163 rc = 0;
9164
9165test_loopback_rx_exit:
f3c87cdd
YG
9166
9167 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9168 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9169 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9170 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9171
9172 /* Update producers */
9173 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9174 fp->rx_sge_prod);
f3c87cdd
YG
9175
9176test_loopback_exit:
9177 bp->link_params.loopback_mode = LOOPBACK_NONE;
9178
9179 return rc;
9180}
9181
9182static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9183{
9184 int rc = 0;
9185
9186 if (!netif_running(bp->dev))
9187 return BNX2X_LOOPBACK_FAILED;
9188
f8ef6e44 9189 bnx2x_netif_stop(bp, 1);
3910c8ae 9190 bnx2x_acquire_phy_lock(bp);
f3c87cdd
YG
9191
9192 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
9193 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
9194 rc |= BNX2X_MAC_LOOPBACK_FAILED;
9195 }
9196
9197 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
9198 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
9199 rc |= BNX2X_PHY_LOOPBACK_FAILED;
9200 }
9201
3910c8ae 9202 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
9203 bnx2x_netif_start(bp);
9204
9205 return rc;
9206}
9207
9208#define CRC32_RESIDUAL 0xdebb20e3
9209
9210static int bnx2x_test_nvram(struct bnx2x *bp)
9211{
9212 static const struct {
9213 int offset;
9214 int size;
9215 } nvram_tbl[] = {
9216 { 0, 0x14 }, /* bootstrap */
9217 { 0x14, 0xec }, /* dir */
9218 { 0x100, 0x350 }, /* manuf_info */
9219 { 0x450, 0xf0 }, /* feature_info */
9220 { 0x640, 0x64 }, /* upgrade_key_info */
9221 { 0x6a4, 0x64 },
9222 { 0x708, 0x70 }, /* manuf_key_info */
9223 { 0x778, 0x70 },
9224 { 0, 0 }
9225 };
9226 u32 buf[0x350 / 4];
9227 u8 *data = (u8 *)buf;
9228 int i, rc;
9229 u32 magic, csum;
9230
9231 rc = bnx2x_nvram_read(bp, 0, data, 4);
9232 if (rc) {
9233 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
9234 goto test_nvram_exit;
9235 }
9236
9237 magic = be32_to_cpu(buf[0]);
9238 if (magic != 0x669955aa) {
9239 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9240 rc = -ENODEV;
9241 goto test_nvram_exit;
9242 }
9243
9244 for (i = 0; nvram_tbl[i].size; i++) {
9245
9246 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9247 nvram_tbl[i].size);
9248 if (rc) {
9249 DP(NETIF_MSG_PROBE,
9250 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
9251 goto test_nvram_exit;
9252 }
9253
9254 csum = ether_crc_le(nvram_tbl[i].size, data);
9255 if (csum != CRC32_RESIDUAL) {
9256 DP(NETIF_MSG_PROBE,
9257 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9258 rc = -ENODEV;
9259 goto test_nvram_exit;
9260 }
9261 }
9262
9263test_nvram_exit:
9264 return rc;
9265}
9266
9267static int bnx2x_test_intr(struct bnx2x *bp)
9268{
9269 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9270 int i, rc;
9271
9272 if (!netif_running(bp->dev))
9273 return -ENODEV;
9274
8d9c5f34 9275 config->hdr.length = 0;
af246401
EG
9276 if (CHIP_IS_E1(bp))
9277 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9278 else
9279 config->hdr.offset = BP_FUNC(bp);
f3c87cdd
YG
9280 config->hdr.client_id = BP_CL_ID(bp);
9281 config->hdr.reserved1 = 0;
9282
9283 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9284 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9285 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9286 if (rc == 0) {
9287 bp->set_mac_pending++;
9288 for (i = 0; i < 10; i++) {
9289 if (!bp->set_mac_pending)
9290 break;
9291 msleep_interruptible(10);
9292 }
9293 if (i == 10)
9294 rc = -ENODEV;
9295 }
9296
9297 return rc;
9298}
9299
a2fbb9ea
ET
9300static void bnx2x_self_test(struct net_device *dev,
9301 struct ethtool_test *etest, u64 *buf)
9302{
9303 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
9304
9305 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9306
f3c87cdd 9307 if (!netif_running(dev))
a2fbb9ea 9308 return;
a2fbb9ea 9309
33471629 9310 /* offline tests are not supported in MF mode */
f3c87cdd
YG
9311 if (IS_E1HMF(bp))
9312 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9313
9314 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9315 u8 link_up;
9316
9317 link_up = bp->link_vars.link_up;
9318 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9319 bnx2x_nic_load(bp, LOAD_DIAG);
9320 /* wait until link state is restored */
9321 bnx2x_wait_for_link(bp, link_up);
9322
9323 if (bnx2x_test_registers(bp) != 0) {
9324 buf[0] = 1;
9325 etest->flags |= ETH_TEST_FL_FAILED;
9326 }
9327 if (bnx2x_test_memory(bp) != 0) {
9328 buf[1] = 1;
9329 etest->flags |= ETH_TEST_FL_FAILED;
9330 }
9331 buf[2] = bnx2x_test_loopback(bp, link_up);
9332 if (buf[2] != 0)
9333 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 9334
f3c87cdd
YG
9335 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9336 bnx2x_nic_load(bp, LOAD_NORMAL);
9337 /* wait until link state is restored */
9338 bnx2x_wait_for_link(bp, link_up);
9339 }
9340 if (bnx2x_test_nvram(bp) != 0) {
9341 buf[3] = 1;
a2fbb9ea
ET
9342 etest->flags |= ETH_TEST_FL_FAILED;
9343 }
f3c87cdd
YG
9344 if (bnx2x_test_intr(bp) != 0) {
9345 buf[4] = 1;
9346 etest->flags |= ETH_TEST_FL_FAILED;
9347 }
9348 if (bp->port.pmf)
9349 if (bnx2x_link_test(bp) != 0) {
9350 buf[5] = 1;
9351 etest->flags |= ETH_TEST_FL_FAILED;
9352 }
f3c87cdd
YG
9353
9354#ifdef BNX2X_EXTRA_DEBUG
9355 bnx2x_panic_dump(bp);
9356#endif
a2fbb9ea
ET
9357}
9358
de832a55
EG
9359static const struct {
9360 long offset;
9361 int size;
9362 u8 string[ETH_GSTRING_LEN];
9363} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9364/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9365 { Q_STATS_OFFSET32(error_bytes_received_hi),
9366 8, "[%d]: rx_error_bytes" },
9367 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9368 8, "[%d]: rx_ucast_packets" },
9369 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9370 8, "[%d]: rx_mcast_packets" },
9371 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9372 8, "[%d]: rx_bcast_packets" },
9373 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9374 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9375 4, "[%d]: rx_phy_ip_err_discards"},
9376 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9377 4, "[%d]: rx_skb_alloc_discard" },
9378 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9379
9380/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9381 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9382 8, "[%d]: tx_packets" }
9383};
9384
bb2a0f7a
YG
9385static const struct {
9386 long offset;
9387 int size;
9388 u32 flags;
66e855f3
YG
9389#define STATS_FLAGS_PORT 1
9390#define STATS_FLAGS_FUNC 2
de832a55 9391#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 9392 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 9393} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
9394/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9395 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 9396 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 9397 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 9398 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 9399 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 9400 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 9401 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 9402 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 9403 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 9404 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 9405 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 9406 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 9407 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
9408 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9409 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9410 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9411 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9412/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9413 8, STATS_FLAGS_PORT, "rx_fragments" },
9414 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9415 8, STATS_FLAGS_PORT, "rx_jabbers" },
9416 { STATS_OFFSET32(no_buff_discard_hi),
9417 8, STATS_FLAGS_BOTH, "rx_discards" },
9418 { STATS_OFFSET32(mac_filter_discard),
9419 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9420 { STATS_OFFSET32(xxoverflow_discard),
9421 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9422 { STATS_OFFSET32(brb_drop_hi),
9423 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9424 { STATS_OFFSET32(brb_truncate_hi),
9425 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9426 { STATS_OFFSET32(pause_frames_received_hi),
9427 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9428 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9429 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9430 { STATS_OFFSET32(nig_timer_max),
9431 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9432/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9433 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9434 { STATS_OFFSET32(rx_skb_alloc_failed),
9435 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9436 { STATS_OFFSET32(hw_csum_err),
9437 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9438
9439 { STATS_OFFSET32(total_bytes_transmitted_hi),
9440 8, STATS_FLAGS_BOTH, "tx_bytes" },
9441 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9442 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9443 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9444 8, STATS_FLAGS_BOTH, "tx_packets" },
9445 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9446 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9447 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9448 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 9449 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 9450 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 9451 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 9452 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 9453/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 9454 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 9455 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 9456 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 9457 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 9458 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 9459 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 9460 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 9461 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 9462 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 9463 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 9464 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 9465 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 9466 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 9467 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 9468 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 9469 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 9470 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 9471 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 9472 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 9473/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 9474 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
9475 { STATS_OFFSET32(pause_frames_sent_hi),
9476 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
9477};
9478
de832a55
EG
9479#define IS_PORT_STAT(i) \
9480 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9481#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9482#define IS_E1HMF_MODE_STAT(bp) \
9483 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
66e855f3 9484
a2fbb9ea
ET
9485static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9486{
bb2a0f7a 9487 struct bnx2x *bp = netdev_priv(dev);
de832a55 9488 int i, j, k;
bb2a0f7a 9489
a2fbb9ea
ET
9490 switch (stringset) {
9491 case ETH_SS_STATS:
de832a55
EG
9492 if (is_multi(bp)) {
9493 k = 0;
9494 for_each_queue(bp, i) {
9495 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9496 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9497 bnx2x_q_stats_arr[j].string, i);
9498 k += BNX2X_NUM_Q_STATS;
9499 }
9500 if (IS_E1HMF_MODE_STAT(bp))
9501 break;
9502 for (j = 0; j < BNX2X_NUM_STATS; j++)
9503 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9504 bnx2x_stats_arr[j].string);
9505 } else {
9506 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9507 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9508 continue;
9509 strcpy(buf + j*ETH_GSTRING_LEN,
9510 bnx2x_stats_arr[i].string);
9511 j++;
9512 }
bb2a0f7a 9513 }
a2fbb9ea
ET
9514 break;
9515
9516 case ETH_SS_TEST:
9517 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9518 break;
9519 }
9520}
9521
9522static int bnx2x_get_stats_count(struct net_device *dev)
9523{
bb2a0f7a 9524 struct bnx2x *bp = netdev_priv(dev);
de832a55 9525 int i, num_stats;
bb2a0f7a 9526
de832a55
EG
9527 if (is_multi(bp)) {
9528 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9529 if (!IS_E1HMF_MODE_STAT(bp))
9530 num_stats += BNX2X_NUM_STATS;
9531 } else {
9532 if (IS_E1HMF_MODE_STAT(bp)) {
9533 num_stats = 0;
9534 for (i = 0; i < BNX2X_NUM_STATS; i++)
9535 if (IS_FUNC_STAT(i))
9536 num_stats++;
9537 } else
9538 num_stats = BNX2X_NUM_STATS;
bb2a0f7a 9539 }
de832a55 9540
bb2a0f7a 9541 return num_stats;
a2fbb9ea
ET
9542}
9543
9544static void bnx2x_get_ethtool_stats(struct net_device *dev,
9545 struct ethtool_stats *stats, u64 *buf)
9546{
9547 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
9548 u32 *hw_stats, *offset;
9549 int i, j, k;
bb2a0f7a 9550
de832a55
EG
9551 if (is_multi(bp)) {
9552 k = 0;
9553 for_each_queue(bp, i) {
9554 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
9555 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
9556 if (bnx2x_q_stats_arr[j].size == 0) {
9557 /* skip this counter */
9558 buf[k + j] = 0;
9559 continue;
9560 }
9561 offset = (hw_stats +
9562 bnx2x_q_stats_arr[j].offset);
9563 if (bnx2x_q_stats_arr[j].size == 4) {
9564 /* 4-byte counter */
9565 buf[k + j] = (u64) *offset;
9566 continue;
9567 }
9568 /* 8-byte counter */
9569 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9570 }
9571 k += BNX2X_NUM_Q_STATS;
9572 }
9573 if (IS_E1HMF_MODE_STAT(bp))
9574 return;
9575 hw_stats = (u32 *)&bp->eth_stats;
9576 for (j = 0; j < BNX2X_NUM_STATS; j++) {
9577 if (bnx2x_stats_arr[j].size == 0) {
9578 /* skip this counter */
9579 buf[k + j] = 0;
9580 continue;
9581 }
9582 offset = (hw_stats + bnx2x_stats_arr[j].offset);
9583 if (bnx2x_stats_arr[j].size == 4) {
9584 /* 4-byte counter */
9585 buf[k + j] = (u64) *offset;
9586 continue;
9587 }
9588 /* 8-byte counter */
9589 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 9590 }
de832a55
EG
9591 } else {
9592 hw_stats = (u32 *)&bp->eth_stats;
9593 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9594 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9595 continue;
9596 if (bnx2x_stats_arr[i].size == 0) {
9597 /* skip this counter */
9598 buf[j] = 0;
9599 j++;
9600 continue;
9601 }
9602 offset = (hw_stats + bnx2x_stats_arr[i].offset);
9603 if (bnx2x_stats_arr[i].size == 4) {
9604 /* 4-byte counter */
9605 buf[j] = (u64) *offset;
9606 j++;
9607 continue;
9608 }
9609 /* 8-byte counter */
9610 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 9611 j++;
a2fbb9ea 9612 }
a2fbb9ea
ET
9613 }
9614}
9615
9616static int bnx2x_phys_id(struct net_device *dev, u32 data)
9617{
9618 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9619 int port = BP_PORT(bp);
a2fbb9ea
ET
9620 int i;
9621
34f80b04
EG
9622 if (!netif_running(dev))
9623 return 0;
9624
9625 if (!bp->port.pmf)
9626 return 0;
9627
a2fbb9ea
ET
9628 if (data == 0)
9629 data = 2;
9630
9631 for (i = 0; i < (data * 2); i++) {
c18487ee 9632 if ((i % 2) == 0)
34f80b04 9633 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
9634 bp->link_params.hw_led_mode,
9635 bp->link_params.chip_id);
9636 else
34f80b04 9637 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
9638 bp->link_params.hw_led_mode,
9639 bp->link_params.chip_id);
9640
a2fbb9ea
ET
9641 msleep_interruptible(500);
9642 if (signal_pending(current))
9643 break;
9644 }
9645
c18487ee 9646 if (bp->link_vars.link_up)
34f80b04 9647 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
9648 bp->link_vars.line_speed,
9649 bp->link_params.hw_led_mode,
9650 bp->link_params.chip_id);
a2fbb9ea
ET
9651
9652 return 0;
9653}
9654
9655static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
9656 .get_settings = bnx2x_get_settings,
9657 .set_settings = bnx2x_set_settings,
9658 .get_drvinfo = bnx2x_get_drvinfo,
a2fbb9ea
ET
9659 .get_wol = bnx2x_get_wol,
9660 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
9661 .get_msglevel = bnx2x_get_msglevel,
9662 .set_msglevel = bnx2x_set_msglevel,
9663 .nway_reset = bnx2x_nway_reset,
9664 .get_link = ethtool_op_get_link,
9665 .get_eeprom_len = bnx2x_get_eeprom_len,
9666 .get_eeprom = bnx2x_get_eeprom,
9667 .set_eeprom = bnx2x_set_eeprom,
9668 .get_coalesce = bnx2x_get_coalesce,
9669 .set_coalesce = bnx2x_set_coalesce,
9670 .get_ringparam = bnx2x_get_ringparam,
9671 .set_ringparam = bnx2x_set_ringparam,
9672 .get_pauseparam = bnx2x_get_pauseparam,
9673 .set_pauseparam = bnx2x_set_pauseparam,
9674 .get_rx_csum = bnx2x_get_rx_csum,
9675 .set_rx_csum = bnx2x_set_rx_csum,
9676 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 9677 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
9678 .set_flags = bnx2x_set_flags,
9679 .get_flags = ethtool_op_get_flags,
9680 .get_sg = ethtool_op_get_sg,
9681 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
9682 .get_tso = ethtool_op_get_tso,
9683 .set_tso = bnx2x_set_tso,
9684 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
9685 .self_test = bnx2x_self_test,
9686 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
9687 .phys_id = bnx2x_phys_id,
9688 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 9689 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
9690};
9691
9692/* end of ethtool_ops */
9693
9694/****************************************************************************
9695* General service functions
9696****************************************************************************/
9697
9698static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9699{
9700 u16 pmcsr;
9701
9702 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9703
9704 switch (state) {
9705 case PCI_D0:
34f80b04 9706 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
9707 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9708 PCI_PM_CTRL_PME_STATUS));
9709
9710 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 9711 /* delay required during transition out of D3hot */
a2fbb9ea 9712 msleep(20);
34f80b04 9713 break;
a2fbb9ea 9714
34f80b04
EG
9715 case PCI_D3hot:
9716 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9717 pmcsr |= 3;
a2fbb9ea 9718
34f80b04
EG
9719 if (bp->wol)
9720 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 9721
34f80b04
EG
9722 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9723 pmcsr);
a2fbb9ea 9724
34f80b04
EG
9725 /* No more memory access after this point until
9726 * device is brought back to D0.
9727 */
9728 break;
9729
9730 default:
9731 return -EINVAL;
9732 }
9733 return 0;
a2fbb9ea
ET
9734}
9735
237907c1
EG
9736static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
9737{
9738 u16 rx_cons_sb;
9739
9740 /* Tell compiler that status block fields can change */
9741 barrier();
9742 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9743 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9744 rx_cons_sb++;
9745 return (fp->rx_comp_cons != rx_cons_sb);
9746}
9747
34f80b04
EG
9748/*
9749 * net_device service functions
9750 */
9751
a2fbb9ea
ET
9752static int bnx2x_poll(struct napi_struct *napi, int budget)
9753{
9754 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9755 napi);
9756 struct bnx2x *bp = fp->bp;
9757 int work_done = 0;
9758
9759#ifdef BNX2X_STOP_ON_ERROR
9760 if (unlikely(bp->panic))
34f80b04 9761 goto poll_panic;
a2fbb9ea
ET
9762#endif
9763
9764 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9765 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9766 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9767
9768 bnx2x_update_fpsb_idx(fp);
9769
237907c1 9770 if (bnx2x_has_tx_work(fp))
a2fbb9ea
ET
9771 bnx2x_tx_int(fp, budget);
9772
237907c1 9773 if (bnx2x_has_rx_work(fp))
a2fbb9ea 9774 work_done = bnx2x_rx_int(fp, budget);
da5a662a 9775 rmb(); /* BNX2X_HAS_WORK() reads the status block */
a2fbb9ea
ET
9776
9777 /* must not complete if we consumed full budget */
da5a662a 9778 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
a2fbb9ea
ET
9779
9780#ifdef BNX2X_STOP_ON_ERROR
34f80b04 9781poll_panic:
a2fbb9ea 9782#endif
288379f0 9783 napi_complete(napi);
a2fbb9ea 9784
34f80b04 9785 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
a2fbb9ea 9786 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
34f80b04 9787 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
a2fbb9ea
ET
9788 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9789 }
a2fbb9ea
ET
9790 return work_done;
9791}
9792
755735eb
EG
9793
9794/* we split the first BD into headers and data BDs
33471629 9795 * to ease the pain of our fellow microcode engineers
755735eb
EG
9796 * we use one mapping for both BDs
9797 * So far this has only been observed to happen
9798 * in Other Operating Systems(TM)
9799 */
9800static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9801 struct bnx2x_fastpath *fp,
9802 struct eth_tx_bd **tx_bd, u16 hlen,
9803 u16 bd_prod, int nbd)
9804{
9805 struct eth_tx_bd *h_tx_bd = *tx_bd;
9806 struct eth_tx_bd *d_tx_bd;
9807 dma_addr_t mapping;
9808 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9809
9810 /* first fix first BD */
9811 h_tx_bd->nbd = cpu_to_le16(nbd);
9812 h_tx_bd->nbytes = cpu_to_le16(hlen);
9813
9814 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9815 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9816 h_tx_bd->addr_lo, h_tx_bd->nbd);
9817
9818 /* now get a new data BD
9819 * (after the pbd) and fill it */
9820 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9821 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9822
9823 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9824 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9825
9826 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9827 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9828 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9829 d_tx_bd->vlan = 0;
9830 /* this marks the BD as one that has no individual mapping
9831 * the FW ignores this flag in a BD not marked start
9832 */
9833 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9834 DP(NETIF_MSG_TX_QUEUED,
9835 "TSO split data size is %d (%x:%x)\n",
9836 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9837
9838 /* update tx_bd for marking the last BD flag */
9839 *tx_bd = d_tx_bd;
9840
9841 return bd_prod;
9842}
9843
9844static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9845{
9846 if (fix > 0)
9847 csum = (u16) ~csum_fold(csum_sub(csum,
9848 csum_partial(t_header - fix, fix, 0)));
9849
9850 else if (fix < 0)
9851 csum = (u16) ~csum_fold(csum_add(csum,
9852 csum_partial(t_header, -fix, 0)));
9853
9854 return swab16(csum);
9855}
9856
9857static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
9858{
9859 u32 rc;
9860
9861 if (skb->ip_summed != CHECKSUM_PARTIAL)
9862 rc = XMIT_PLAIN;
9863
9864 else {
9865 if (skb->protocol == ntohs(ETH_P_IPV6)) {
9866 rc = XMIT_CSUM_V6;
9867 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
9868 rc |= XMIT_CSUM_TCP;
9869
9870 } else {
9871 rc = XMIT_CSUM_V4;
9872 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
9873 rc |= XMIT_CSUM_TCP;
9874 }
9875 }
9876
9877 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
9878 rc |= XMIT_GSO_V4;
9879
9880 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
9881 rc |= XMIT_GSO_V6;
9882
9883 return rc;
9884}
9885
632da4d6 9886#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
755735eb
EG
9887/* check if packet requires linearization (packet is too fragmented) */
9888static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
9889 u32 xmit_type)
9890{
9891 int to_copy = 0;
9892 int hlen = 0;
9893 int first_bd_sz = 0;
9894
9895 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
9896 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
9897
9898 if (xmit_type & XMIT_GSO) {
9899 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
9900 /* Check if LSO packet needs to be copied:
9901 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
9902 int wnd_size = MAX_FETCH_BD - 3;
33471629 9903 /* Number of windows to check */
755735eb
EG
9904 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
9905 int wnd_idx = 0;
9906 int frag_idx = 0;
9907 u32 wnd_sum = 0;
9908
9909 /* Headers length */
9910 hlen = (int)(skb_transport_header(skb) - skb->data) +
9911 tcp_hdrlen(skb);
9912
9913 /* Amount of data (w/o headers) on linear part of SKB*/
9914 first_bd_sz = skb_headlen(skb) - hlen;
9915
9916 wnd_sum = first_bd_sz;
9917
9918 /* Calculate the first sum - it's special */
9919 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
9920 wnd_sum +=
9921 skb_shinfo(skb)->frags[frag_idx].size;
9922
9923 /* If there was data on linear skb data - check it */
9924 if (first_bd_sz > 0) {
9925 if (unlikely(wnd_sum < lso_mss)) {
9926 to_copy = 1;
9927 goto exit_lbl;
9928 }
9929
9930 wnd_sum -= first_bd_sz;
9931 }
9932
9933 /* Others are easier: run through the frag list and
9934 check all windows */
9935 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
9936 wnd_sum +=
9937 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
9938
9939 if (unlikely(wnd_sum < lso_mss)) {
9940 to_copy = 1;
9941 break;
9942 }
9943 wnd_sum -=
9944 skb_shinfo(skb)->frags[wnd_idx].size;
9945 }
9946
9947 } else {
9948 /* in non-LSO too fragmented packet should always
9949 be linearized */
9950 to_copy = 1;
9951 }
9952 }
9953
9954exit_lbl:
9955 if (unlikely(to_copy))
9956 DP(NETIF_MSG_TX_QUEUED,
9957 "Linearization IS REQUIRED for %s packet. "
9958 "num_frags %d hlen %d first_bd_sz %d\n",
9959 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
9960 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
9961
9962 return to_copy;
9963}
632da4d6 9964#endif
755735eb
EG
9965
9966/* called with netif_tx_lock
a2fbb9ea 9967 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 9968 * netif_wake_queue()
a2fbb9ea
ET
9969 */
9970static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
9971{
9972 struct bnx2x *bp = netdev_priv(dev);
9973 struct bnx2x_fastpath *fp;
555f6c78 9974 struct netdev_queue *txq;
a2fbb9ea
ET
9975 struct sw_tx_bd *tx_buf;
9976 struct eth_tx_bd *tx_bd;
9977 struct eth_tx_parse_bd *pbd = NULL;
9978 u16 pkt_prod, bd_prod;
755735eb 9979 int nbd, fp_index;
a2fbb9ea 9980 dma_addr_t mapping;
755735eb
EG
9981 u32 xmit_type = bnx2x_xmit_type(bp, skb);
9982 int vlan_off = (bp->e1hov ? 4 : 0);
9983 int i;
9984 u8 hlen = 0;
a2fbb9ea
ET
9985
9986#ifdef BNX2X_STOP_ON_ERROR
9987 if (unlikely(bp->panic))
9988 return NETDEV_TX_BUSY;
9989#endif
9990
555f6c78
EG
9991 fp_index = skb_get_queue_mapping(skb);
9992 txq = netdev_get_tx_queue(dev, fp_index);
9993
a2fbb9ea 9994 fp = &bp->fp[fp_index];
755735eb 9995
231fd58a 9996 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
de832a55 9997 fp->eth_q_stats.driver_xoff++,
555f6c78 9998 netif_tx_stop_queue(txq);
a2fbb9ea
ET
9999 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10000 return NETDEV_TX_BUSY;
10001 }
10002
755735eb
EG
10003 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10004 " gso type %x xmit_type %x\n",
10005 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10006 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10007
632da4d6 10008#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
33471629 10009 /* First, check if we need to linearize the skb
755735eb
EG
10010 (due to FW restrictions) */
10011 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10012 /* Statistics of linearization */
10013 bp->lin_cnt++;
10014 if (skb_linearize(skb) != 0) {
10015 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10016 "silently dropping this SKB\n");
10017 dev_kfree_skb_any(skb);
da5a662a 10018 return NETDEV_TX_OK;
755735eb
EG
10019 }
10020 }
632da4d6 10021#endif
755735eb 10022
a2fbb9ea 10023 /*
755735eb 10024 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 10025 then for TSO or xsum we have a parsing info BD,
755735eb 10026 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
10027 (don't forget to mark the last one as last,
10028 and to unmap only AFTER you write to the BD ...)
755735eb 10029 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
10030 */
10031
10032 pkt_prod = fp->tx_pkt_prod++;
755735eb 10033 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 10034
755735eb 10035 /* get a tx_buf and first BD */
a2fbb9ea
ET
10036 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10037 tx_bd = &fp->tx_desc_ring[bd_prod];
10038
10039 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10040 tx_bd->general_data = (UNICAST_ADDRESS <<
10041 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a
EG
10042 /* header nbd */
10043 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
a2fbb9ea 10044
755735eb
EG
10045 /* remember the first BD of the packet */
10046 tx_buf->first_bd = fp->tx_bd_prod;
10047 tx_buf->skb = skb;
a2fbb9ea
ET
10048
10049 DP(NETIF_MSG_TX_QUEUED,
10050 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10051 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10052
0c6671b0
EG
10053#ifdef BCM_VLAN
10054 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10055 (bp->flags & HW_VLAN_TX_FLAG)) {
755735eb
EG
10056 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10057 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10058 vlan_off += 4;
10059 } else
0c6671b0 10060#endif
755735eb 10061 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 10062
755735eb 10063 if (xmit_type) {
755735eb 10064 /* turn on parsing and get a BD */
a2fbb9ea
ET
10065 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10066 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
10067
10068 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10069 }
10070
10071 if (xmit_type & XMIT_CSUM) {
10072 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
10073
10074 /* for now NS flag is not used in Linux */
755735eb 10075 pbd->global_data = (hlen |
96fc1784 10076 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
a2fbb9ea 10077 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 10078
755735eb
EG
10079 pbd->ip_hlen = (skb_transport_header(skb) -
10080 skb_network_header(skb)) / 2;
10081
10082 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 10083
755735eb
EG
10084 pbd->total_hlen = cpu_to_le16(hlen);
10085 hlen = hlen*2 - vlan_off;
a2fbb9ea 10086
755735eb
EG
10087 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10088
10089 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 10090 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
10091 ETH_TX_BD_FLAGS_IP_CSUM;
10092 else
10093 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10094
10095 if (xmit_type & XMIT_CSUM_TCP) {
10096 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10097
10098 } else {
10099 s8 fix = SKB_CS_OFF(skb); /* signed! */
10100
a2fbb9ea 10101 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 10102 pbd->cs_offset = fix / 2;
a2fbb9ea 10103
755735eb
EG
10104 DP(NETIF_MSG_TX_QUEUED,
10105 "hlen %d offset %d fix %d csum before fix %x\n",
10106 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10107 SKB_CS(skb));
10108
10109 /* HW bug: fixup the CSUM */
10110 pbd->tcp_pseudo_csum =
10111 bnx2x_csum_fix(skb_transport_header(skb),
10112 SKB_CS(skb), fix);
10113
10114 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10115 pbd->tcp_pseudo_csum);
10116 }
a2fbb9ea
ET
10117 }
10118
10119 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 10120 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
10121
10122 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10123 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6378c025 10124 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
a2fbb9ea
ET
10125 tx_bd->nbd = cpu_to_le16(nbd);
10126 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10127
10128 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
10129 " nbytes %d flags %x vlan %x\n",
10130 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10131 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10132 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 10133
755735eb 10134 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
10135
10136 DP(NETIF_MSG_TX_QUEUED,
10137 "TSO packet len %d hlen %d total len %d tso size %d\n",
10138 skb->len, hlen, skb_headlen(skb),
10139 skb_shinfo(skb)->gso_size);
10140
10141 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10142
755735eb
EG
10143 if (unlikely(skb_headlen(skb) > hlen))
10144 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10145 bd_prod, ++nbd);
a2fbb9ea
ET
10146
10147 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10148 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
10149 pbd->tcp_flags = pbd_tcp_flags(skb);
10150
10151 if (xmit_type & XMIT_GSO_V4) {
10152 pbd->ip_id = swab16(ip_hdr(skb)->id);
10153 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
10154 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10155 ip_hdr(skb)->daddr,
10156 0, IPPROTO_TCP, 0));
755735eb
EG
10157
10158 } else
10159 pbd->tcp_pseudo_csum =
10160 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10161 &ipv6_hdr(skb)->daddr,
10162 0, IPPROTO_TCP, 0));
10163
a2fbb9ea
ET
10164 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10165 }
10166
755735eb
EG
10167 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10168 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 10169
755735eb
EG
10170 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10171 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 10172
755735eb
EG
10173 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10174 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 10175
755735eb
EG
10176 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10177 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10178 tx_bd->nbytes = cpu_to_le16(frag->size);
10179 tx_bd->vlan = cpu_to_le16(pkt_prod);
10180 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 10181
755735eb
EG
10182 DP(NETIF_MSG_TX_QUEUED,
10183 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
10184 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10185 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
10186 }
10187
755735eb 10188 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
10189 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10190
10191 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
10192 tx_bd, tx_bd->bd_flags.as_bitfield);
10193
a2fbb9ea
ET
10194 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10195
755735eb 10196 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
10197 * if the packet contains or ends with it
10198 */
10199 if (TX_BD_POFF(bd_prod) < nbd)
10200 nbd++;
10201
10202 if (pbd)
10203 DP(NETIF_MSG_TX_QUEUED,
10204 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10205 " tcp_flags %x xsum %x seq %u hlen %u\n",
10206 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10207 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 10208 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 10209
755735eb 10210 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 10211
58f4c4cf
EG
10212 /*
10213 * Make sure that the BD data is updated before updating the producer
10214 * since FW might read the BD right after the producer is updated.
10215 * This is only applicable for weak-ordered memory model archs such
10216 * as IA-64. The following barrier is also mandatory since FW will
10217 * assumes packets must have BDs.
10218 */
10219 wmb();
10220
96fc1784
ET
10221 fp->hw_tx_prods->bds_prod =
10222 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
a2fbb9ea 10223 mb(); /* FW restriction: must not reorder writing nbd and packets */
96fc1784
ET
10224 fp->hw_tx_prods->packets_prod =
10225 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
755735eb 10226 DOORBELL(bp, FP_IDX(fp), 0);
a2fbb9ea
ET
10227
10228 mmiowb();
10229
755735eb 10230 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
10231 dev->trans_start = jiffies;
10232
10233 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
58f4c4cf
EG
10234 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10235 if we put Tx into XOFF state. */
10236 smp_mb();
555f6c78 10237 netif_tx_stop_queue(txq);
de832a55 10238 fp->eth_q_stats.driver_xoff++;
a2fbb9ea 10239 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 10240 netif_tx_wake_queue(txq);
a2fbb9ea
ET
10241 }
10242 fp->tx_pkt++;
10243
10244 return NETDEV_TX_OK;
10245}
10246
bb2a0f7a 10247/* called with rtnl_lock */
a2fbb9ea
ET
10248static int bnx2x_open(struct net_device *dev)
10249{
10250 struct bnx2x *bp = netdev_priv(dev);
10251
6eccabb3
EG
10252 netif_carrier_off(dev);
10253
a2fbb9ea
ET
10254 bnx2x_set_power_state(bp, PCI_D0);
10255
bb2a0f7a 10256 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
10257}
10258
bb2a0f7a 10259/* called with rtnl_lock */
a2fbb9ea
ET
10260static int bnx2x_close(struct net_device *dev)
10261{
a2fbb9ea
ET
10262 struct bnx2x *bp = netdev_priv(dev);
10263
10264 /* Unload the driver, release IRQs */
bb2a0f7a
YG
10265 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10266 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10267 if (!CHIP_REV_IS_SLOW(bp))
10268 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
10269
10270 return 0;
10271}
10272
34f80b04
EG
10273/* called with netif_tx_lock from set_multicast */
10274static void bnx2x_set_rx_mode(struct net_device *dev)
10275{
10276 struct bnx2x *bp = netdev_priv(dev);
10277 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10278 int port = BP_PORT(bp);
10279
10280 if (bp->state != BNX2X_STATE_OPEN) {
10281 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10282 return;
10283 }
10284
10285 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10286
10287 if (dev->flags & IFF_PROMISC)
10288 rx_mode = BNX2X_RX_MODE_PROMISC;
10289
10290 else if ((dev->flags & IFF_ALLMULTI) ||
10291 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10292 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10293
10294 else { /* some multicasts */
10295 if (CHIP_IS_E1(bp)) {
10296 int i, old, offset;
10297 struct dev_mc_list *mclist;
10298 struct mac_configuration_cmd *config =
10299 bnx2x_sp(bp, mcast_config);
10300
10301 for (i = 0, mclist = dev->mc_list;
10302 mclist && (i < dev->mc_count);
10303 i++, mclist = mclist->next) {
10304
10305 config->config_table[i].
10306 cam_entry.msb_mac_addr =
10307 swab16(*(u16 *)&mclist->dmi_addr[0]);
10308 config->config_table[i].
10309 cam_entry.middle_mac_addr =
10310 swab16(*(u16 *)&mclist->dmi_addr[2]);
10311 config->config_table[i].
10312 cam_entry.lsb_mac_addr =
10313 swab16(*(u16 *)&mclist->dmi_addr[4]);
10314 config->config_table[i].cam_entry.flags =
10315 cpu_to_le16(port);
10316 config->config_table[i].
10317 target_table_entry.flags = 0;
10318 config->config_table[i].
10319 target_table_entry.client_id = 0;
10320 config->config_table[i].
10321 target_table_entry.vlan_id = 0;
10322
10323 DP(NETIF_MSG_IFUP,
10324 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10325 config->config_table[i].
10326 cam_entry.msb_mac_addr,
10327 config->config_table[i].
10328 cam_entry.middle_mac_addr,
10329 config->config_table[i].
10330 cam_entry.lsb_mac_addr);
10331 }
8d9c5f34 10332 old = config->hdr.length;
34f80b04
EG
10333 if (old > i) {
10334 for (; i < old; i++) {
10335 if (CAM_IS_INVALID(config->
10336 config_table[i])) {
af246401 10337 /* already invalidated */
34f80b04
EG
10338 break;
10339 }
10340 /* invalidate */
10341 CAM_INVALIDATE(config->
10342 config_table[i]);
10343 }
10344 }
10345
10346 if (CHIP_REV_IS_SLOW(bp))
10347 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10348 else
10349 offset = BNX2X_MAX_MULTICAST*(1 + port);
10350
8d9c5f34 10351 config->hdr.length = i;
34f80b04 10352 config->hdr.offset = offset;
8d9c5f34 10353 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
10354 config->hdr.reserved1 = 0;
10355
10356 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10357 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10358 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10359 0);
10360 } else { /* E1H */
10361 /* Accept one or more multicasts */
10362 struct dev_mc_list *mclist;
10363 u32 mc_filter[MC_HASH_SIZE];
10364 u32 crc, bit, regidx;
10365 int i;
10366
10367 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10368
10369 for (i = 0, mclist = dev->mc_list;
10370 mclist && (i < dev->mc_count);
10371 i++, mclist = mclist->next) {
10372
7c510e4b
JB
10373 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10374 mclist->dmi_addr);
34f80b04
EG
10375
10376 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10377 bit = (crc >> 24) & 0xff;
10378 regidx = bit >> 5;
10379 bit &= 0x1f;
10380 mc_filter[regidx] |= (1 << bit);
10381 }
10382
10383 for (i = 0; i < MC_HASH_SIZE; i++)
10384 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10385 mc_filter[i]);
10386 }
10387 }
10388
10389 bp->rx_mode = rx_mode;
10390 bnx2x_set_storm_rx_mode(bp);
10391}
10392
10393/* called with rtnl_lock */
a2fbb9ea
ET
10394static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10395{
10396 struct sockaddr *addr = p;
10397 struct bnx2x *bp = netdev_priv(dev);
10398
34f80b04 10399 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
10400 return -EINVAL;
10401
10402 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
10403 if (netif_running(dev)) {
10404 if (CHIP_IS_E1(bp))
3101c2bc 10405 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 10406 else
3101c2bc 10407 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 10408 }
a2fbb9ea
ET
10409
10410 return 0;
10411}
10412
c18487ee 10413/* called with rtnl_lock */
a2fbb9ea
ET
10414static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10415{
10416 struct mii_ioctl_data *data = if_mii(ifr);
10417 struct bnx2x *bp = netdev_priv(dev);
3196a88a 10418 int port = BP_PORT(bp);
a2fbb9ea
ET
10419 int err;
10420
10421 switch (cmd) {
10422 case SIOCGMIIPHY:
34f80b04 10423 data->phy_id = bp->port.phy_addr;
a2fbb9ea 10424
c14423fe 10425 /* fallthrough */
c18487ee 10426
a2fbb9ea 10427 case SIOCGMIIREG: {
c18487ee 10428 u16 mii_regval;
a2fbb9ea 10429
c18487ee
YR
10430 if (!netif_running(dev))
10431 return -EAGAIN;
a2fbb9ea 10432
34f80b04 10433 mutex_lock(&bp->port.phy_mutex);
3196a88a 10434 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10435 DEFAULT_PHY_DEV_ADDR,
10436 (data->reg_num & 0x1f), &mii_regval);
10437 data->val_out = mii_regval;
34f80b04 10438 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10439 return err;
10440 }
10441
10442 case SIOCSMIIREG:
10443 if (!capable(CAP_NET_ADMIN))
10444 return -EPERM;
10445
c18487ee
YR
10446 if (!netif_running(dev))
10447 return -EAGAIN;
10448
34f80b04 10449 mutex_lock(&bp->port.phy_mutex);
3196a88a 10450 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10451 DEFAULT_PHY_DEV_ADDR,
10452 (data->reg_num & 0x1f), data->val_in);
34f80b04 10453 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10454 return err;
10455
10456 default:
10457 /* do nothing */
10458 break;
10459 }
10460
10461 return -EOPNOTSUPP;
10462}
10463
34f80b04 10464/* called with rtnl_lock */
a2fbb9ea
ET
10465static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10466{
10467 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10468 int rc = 0;
a2fbb9ea
ET
10469
10470 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10471 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10472 return -EINVAL;
10473
10474 /* This does not race with packet allocation
c14423fe 10475 * because the actual alloc size is
a2fbb9ea
ET
10476 * only updated as part of load
10477 */
10478 dev->mtu = new_mtu;
10479
10480 if (netif_running(dev)) {
34f80b04
EG
10481 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10482 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 10483 }
34f80b04
EG
10484
10485 return rc;
a2fbb9ea
ET
10486}
10487
10488static void bnx2x_tx_timeout(struct net_device *dev)
10489{
10490 struct bnx2x *bp = netdev_priv(dev);
10491
10492#ifdef BNX2X_STOP_ON_ERROR
10493 if (!bp->panic)
10494 bnx2x_panic();
10495#endif
10496 /* This allows the netif to be shutdown gracefully before resetting */
10497 schedule_work(&bp->reset_task);
10498}
10499
10500#ifdef BCM_VLAN
34f80b04 10501/* called with rtnl_lock */
a2fbb9ea
ET
10502static void bnx2x_vlan_rx_register(struct net_device *dev,
10503 struct vlan_group *vlgrp)
10504{
10505 struct bnx2x *bp = netdev_priv(dev);
10506
10507 bp->vlgrp = vlgrp;
0c6671b0
EG
10508
10509 /* Set flags according to the required capabilities */
10510 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10511
10512 if (dev->features & NETIF_F_HW_VLAN_TX)
10513 bp->flags |= HW_VLAN_TX_FLAG;
10514
10515 if (dev->features & NETIF_F_HW_VLAN_RX)
10516 bp->flags |= HW_VLAN_RX_FLAG;
10517
a2fbb9ea 10518 if (netif_running(dev))
49d66772 10519 bnx2x_set_client_config(bp);
a2fbb9ea 10520}
34f80b04 10521
a2fbb9ea
ET
10522#endif
10523
10524#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10525static void poll_bnx2x(struct net_device *dev)
10526{
10527 struct bnx2x *bp = netdev_priv(dev);
10528
10529 disable_irq(bp->pdev->irq);
10530 bnx2x_interrupt(bp->pdev->irq, dev);
10531 enable_irq(bp->pdev->irq);
10532}
10533#endif
10534
c64213cd
SH
10535static const struct net_device_ops bnx2x_netdev_ops = {
10536 .ndo_open = bnx2x_open,
10537 .ndo_stop = bnx2x_close,
10538 .ndo_start_xmit = bnx2x_start_xmit,
10539 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10540 .ndo_set_mac_address = bnx2x_change_mac_addr,
10541 .ndo_validate_addr = eth_validate_addr,
10542 .ndo_do_ioctl = bnx2x_ioctl,
10543 .ndo_change_mtu = bnx2x_change_mtu,
10544 .ndo_tx_timeout = bnx2x_tx_timeout,
10545#ifdef BCM_VLAN
10546 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10547#endif
10548#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10549 .ndo_poll_controller = poll_bnx2x,
10550#endif
10551};
10552
10553
34f80b04
EG
10554static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10555 struct net_device *dev)
a2fbb9ea
ET
10556{
10557 struct bnx2x *bp;
10558 int rc;
10559
10560 SET_NETDEV_DEV(dev, &pdev->dev);
10561 bp = netdev_priv(dev);
10562
34f80b04
EG
10563 bp->dev = dev;
10564 bp->pdev = pdev;
a2fbb9ea 10565 bp->flags = 0;
34f80b04 10566 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
10567
10568 rc = pci_enable_device(pdev);
10569 if (rc) {
10570 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10571 goto err_out;
10572 }
10573
10574 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10575 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10576 " aborting\n");
10577 rc = -ENODEV;
10578 goto err_out_disable;
10579 }
10580
10581 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10582 printk(KERN_ERR PFX "Cannot find second PCI device"
10583 " base address, aborting\n");
10584 rc = -ENODEV;
10585 goto err_out_disable;
10586 }
10587
34f80b04
EG
10588 if (atomic_read(&pdev->enable_cnt) == 1) {
10589 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10590 if (rc) {
10591 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10592 " aborting\n");
10593 goto err_out_disable;
10594 }
a2fbb9ea 10595
34f80b04
EG
10596 pci_set_master(pdev);
10597 pci_save_state(pdev);
10598 }
a2fbb9ea
ET
10599
10600 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10601 if (bp->pm_cap == 0) {
10602 printk(KERN_ERR PFX "Cannot find power management"
10603 " capability, aborting\n");
10604 rc = -EIO;
10605 goto err_out_release;
10606 }
10607
10608 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10609 if (bp->pcie_cap == 0) {
10610 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10611 " aborting\n");
10612 rc = -EIO;
10613 goto err_out_release;
10614 }
10615
10616 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10617 bp->flags |= USING_DAC_FLAG;
10618 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10619 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10620 " failed, aborting\n");
10621 rc = -EIO;
10622 goto err_out_release;
10623 }
10624
10625 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10626 printk(KERN_ERR PFX "System does not support DMA,"
10627 " aborting\n");
10628 rc = -EIO;
10629 goto err_out_release;
10630 }
10631
34f80b04
EG
10632 dev->mem_start = pci_resource_start(pdev, 0);
10633 dev->base_addr = dev->mem_start;
10634 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
10635
10636 dev->irq = pdev->irq;
10637
275f165f 10638 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
10639 if (!bp->regview) {
10640 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10641 rc = -ENOMEM;
10642 goto err_out_release;
10643 }
10644
34f80b04
EG
10645 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10646 min_t(u64, BNX2X_DB_SIZE,
10647 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
10648 if (!bp->doorbells) {
10649 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10650 rc = -ENOMEM;
10651 goto err_out_unmap;
10652 }
10653
10654 bnx2x_set_power_state(bp, PCI_D0);
10655
34f80b04
EG
10656 /* clean indirect addresses */
10657 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10658 PCICFG_VENDOR_ID_OFFSET);
10659 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10660 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10661 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10662 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 10663
34f80b04 10664 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 10665
c64213cd 10666 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 10667 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
10668 dev->features |= NETIF_F_SG;
10669 dev->features |= NETIF_F_HW_CSUM;
10670 if (bp->flags & USING_DAC_FLAG)
10671 dev->features |= NETIF_F_HIGHDMA;
10672#ifdef BCM_VLAN
10673 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 10674 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
34f80b04
EG
10675#endif
10676 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 10677 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
10678
10679 return 0;
10680
10681err_out_unmap:
10682 if (bp->regview) {
10683 iounmap(bp->regview);
10684 bp->regview = NULL;
10685 }
a2fbb9ea
ET
10686 if (bp->doorbells) {
10687 iounmap(bp->doorbells);
10688 bp->doorbells = NULL;
10689 }
10690
10691err_out_release:
34f80b04
EG
10692 if (atomic_read(&pdev->enable_cnt) == 1)
10693 pci_release_regions(pdev);
a2fbb9ea
ET
10694
10695err_out_disable:
10696 pci_disable_device(pdev);
10697 pci_set_drvdata(pdev, NULL);
10698
10699err_out:
10700 return rc;
10701}
10702
25047950
ET
10703static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10704{
10705 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10706
10707 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10708 return val;
10709}
10710
10711/* return value of 1=2.5GHz 2=5GHz */
10712static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10713{
10714 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10715
10716 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10717 return val;
10718}
10719
a2fbb9ea
ET
10720static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10721 const struct pci_device_id *ent)
10722{
10723 static int version_printed;
10724 struct net_device *dev = NULL;
10725 struct bnx2x *bp;
25047950 10726 int rc;
a2fbb9ea
ET
10727
10728 if (version_printed++ == 0)
10729 printk(KERN_INFO "%s", version);
10730
10731 /* dev zeroed in init_etherdev */
555f6c78 10732 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
10733 if (!dev) {
10734 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 10735 return -ENOMEM;
34f80b04 10736 }
a2fbb9ea 10737
a2fbb9ea
ET
10738 bp = netdev_priv(dev);
10739 bp->msglevel = debug;
10740
34f80b04 10741 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
10742 if (rc < 0) {
10743 free_netdev(dev);
10744 return rc;
10745 }
10746
a2fbb9ea
ET
10747 pci_set_drvdata(pdev, dev);
10748
34f80b04 10749 rc = bnx2x_init_bp(bp);
693fc0d1
EG
10750 if (rc)
10751 goto init_one_exit;
10752
10753 rc = register_netdev(dev);
34f80b04 10754 if (rc) {
693fc0d1 10755 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
10756 goto init_one_exit;
10757 }
10758
10759 bp->common.name = board_info[ent->driver_data].name;
25047950 10760 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
34f80b04
EG
10761 " IRQ %d, ", dev->name, bp->common.name,
10762 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
10763 bnx2x_get_pcie_width(bp),
10764 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10765 dev->base_addr, bp->pdev->irq);
e174961c 10766 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
a2fbb9ea 10767 return 0;
34f80b04
EG
10768
10769init_one_exit:
10770 if (bp->regview)
10771 iounmap(bp->regview);
10772
10773 if (bp->doorbells)
10774 iounmap(bp->doorbells);
10775
10776 free_netdev(dev);
10777
10778 if (atomic_read(&pdev->enable_cnt) == 1)
10779 pci_release_regions(pdev);
10780
10781 pci_disable_device(pdev);
10782 pci_set_drvdata(pdev, NULL);
10783
10784 return rc;
a2fbb9ea
ET
10785}
10786
10787static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10788{
10789 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10790 struct bnx2x *bp;
10791
10792 if (!dev) {
228241eb
ET
10793 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10794 return;
10795 }
228241eb 10796 bp = netdev_priv(dev);
a2fbb9ea 10797
a2fbb9ea
ET
10798 unregister_netdev(dev);
10799
10800 if (bp->regview)
10801 iounmap(bp->regview);
10802
10803 if (bp->doorbells)
10804 iounmap(bp->doorbells);
10805
10806 free_netdev(dev);
34f80b04
EG
10807
10808 if (atomic_read(&pdev->enable_cnt) == 1)
10809 pci_release_regions(pdev);
10810
a2fbb9ea
ET
10811 pci_disable_device(pdev);
10812 pci_set_drvdata(pdev, NULL);
10813}
10814
10815static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10816{
10817 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10818 struct bnx2x *bp;
10819
34f80b04
EG
10820 if (!dev) {
10821 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10822 return -ENODEV;
10823 }
10824 bp = netdev_priv(dev);
a2fbb9ea 10825
34f80b04 10826 rtnl_lock();
a2fbb9ea 10827
34f80b04 10828 pci_save_state(pdev);
228241eb 10829
34f80b04
EG
10830 if (!netif_running(dev)) {
10831 rtnl_unlock();
10832 return 0;
10833 }
a2fbb9ea
ET
10834
10835 netif_device_detach(dev);
a2fbb9ea 10836
da5a662a 10837 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 10838
a2fbb9ea 10839 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 10840
34f80b04
EG
10841 rtnl_unlock();
10842
a2fbb9ea
ET
10843 return 0;
10844}
10845
10846static int bnx2x_resume(struct pci_dev *pdev)
10847{
10848 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 10849 struct bnx2x *bp;
a2fbb9ea
ET
10850 int rc;
10851
228241eb
ET
10852 if (!dev) {
10853 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10854 return -ENODEV;
10855 }
228241eb 10856 bp = netdev_priv(dev);
a2fbb9ea 10857
34f80b04
EG
10858 rtnl_lock();
10859
228241eb 10860 pci_restore_state(pdev);
34f80b04
EG
10861
10862 if (!netif_running(dev)) {
10863 rtnl_unlock();
10864 return 0;
10865 }
10866
a2fbb9ea
ET
10867 bnx2x_set_power_state(bp, PCI_D0);
10868 netif_device_attach(dev);
10869
da5a662a 10870 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 10871
34f80b04
EG
10872 rtnl_unlock();
10873
10874 return rc;
a2fbb9ea
ET
10875}
10876
f8ef6e44
YG
10877static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
10878{
10879 int i;
10880
10881 bp->state = BNX2X_STATE_ERROR;
10882
10883 bp->rx_mode = BNX2X_RX_MODE_NONE;
10884
10885 bnx2x_netif_stop(bp, 0);
10886
10887 del_timer_sync(&bp->timer);
10888 bp->stats_state = STATS_STATE_DISABLED;
10889 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
10890
10891 /* Release IRQs */
10892 bnx2x_free_irq(bp);
10893
10894 if (CHIP_IS_E1(bp)) {
10895 struct mac_configuration_cmd *config =
10896 bnx2x_sp(bp, mcast_config);
10897
8d9c5f34 10898 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
10899 CAM_INVALIDATE(config->config_table[i]);
10900 }
10901
10902 /* Free SKBs, SGEs, TPA pool and driver internals */
10903 bnx2x_free_skbs(bp);
555f6c78 10904 for_each_rx_queue(bp, i)
f8ef6e44 10905 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 10906 for_each_rx_queue(bp, i)
7cde1c8b 10907 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
10908 bnx2x_free_mem(bp);
10909
10910 bp->state = BNX2X_STATE_CLOSED;
10911
10912 netif_carrier_off(bp->dev);
10913
10914 return 0;
10915}
10916
10917static void bnx2x_eeh_recover(struct bnx2x *bp)
10918{
10919 u32 val;
10920
10921 mutex_init(&bp->port.phy_mutex);
10922
10923 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
10924 bp->link_params.shmem_base = bp->common.shmem_base;
10925 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
10926
10927 if (!bp->common.shmem_base ||
10928 (bp->common.shmem_base < 0xA0000) ||
10929 (bp->common.shmem_base >= 0xC0000)) {
10930 BNX2X_DEV_INFO("MCP not active\n");
10931 bp->flags |= NO_MCP_FLAG;
10932 return;
10933 }
10934
10935 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
10936 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10937 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
10938 BNX2X_ERR("BAD MCP validity signature\n");
10939
10940 if (!BP_NOMCP(bp)) {
10941 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
10942 & DRV_MSG_SEQ_NUMBER_MASK);
10943 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
10944 }
10945}
10946
493adb1f
WX
10947/**
10948 * bnx2x_io_error_detected - called when PCI error is detected
10949 * @pdev: Pointer to PCI device
10950 * @state: The current pci connection state
10951 *
10952 * This function is called after a PCI bus error affecting
10953 * this device has been detected.
10954 */
10955static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
10956 pci_channel_state_t state)
10957{
10958 struct net_device *dev = pci_get_drvdata(pdev);
10959 struct bnx2x *bp = netdev_priv(dev);
10960
10961 rtnl_lock();
10962
10963 netif_device_detach(dev);
10964
10965 if (netif_running(dev))
f8ef6e44 10966 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
10967
10968 pci_disable_device(pdev);
10969
10970 rtnl_unlock();
10971
10972 /* Request a slot reset */
10973 return PCI_ERS_RESULT_NEED_RESET;
10974}
10975
10976/**
10977 * bnx2x_io_slot_reset - called after the PCI bus has been reset
10978 * @pdev: Pointer to PCI device
10979 *
10980 * Restart the card from scratch, as if from a cold-boot.
10981 */
10982static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
10983{
10984 struct net_device *dev = pci_get_drvdata(pdev);
10985 struct bnx2x *bp = netdev_priv(dev);
10986
10987 rtnl_lock();
10988
10989 if (pci_enable_device(pdev)) {
10990 dev_err(&pdev->dev,
10991 "Cannot re-enable PCI device after reset\n");
10992 rtnl_unlock();
10993 return PCI_ERS_RESULT_DISCONNECT;
10994 }
10995
10996 pci_set_master(pdev);
10997 pci_restore_state(pdev);
10998
10999 if (netif_running(dev))
11000 bnx2x_set_power_state(bp, PCI_D0);
11001
11002 rtnl_unlock();
11003
11004 return PCI_ERS_RESULT_RECOVERED;
11005}
11006
11007/**
11008 * bnx2x_io_resume - called when traffic can start flowing again
11009 * @pdev: Pointer to PCI device
11010 *
11011 * This callback is called when the error recovery driver tells us that
11012 * its OK to resume normal operation.
11013 */
11014static void bnx2x_io_resume(struct pci_dev *pdev)
11015{
11016 struct net_device *dev = pci_get_drvdata(pdev);
11017 struct bnx2x *bp = netdev_priv(dev);
11018
11019 rtnl_lock();
11020
f8ef6e44
YG
11021 bnx2x_eeh_recover(bp);
11022
493adb1f 11023 if (netif_running(dev))
f8ef6e44 11024 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
11025
11026 netif_device_attach(dev);
11027
11028 rtnl_unlock();
11029}
11030
11031static struct pci_error_handlers bnx2x_err_handler = {
11032 .error_detected = bnx2x_io_error_detected,
11033 .slot_reset = bnx2x_io_slot_reset,
11034 .resume = bnx2x_io_resume,
11035};
11036
a2fbb9ea 11037static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
11038 .name = DRV_MODULE_NAME,
11039 .id_table = bnx2x_pci_tbl,
11040 .probe = bnx2x_init_one,
11041 .remove = __devexit_p(bnx2x_remove_one),
11042 .suspend = bnx2x_suspend,
11043 .resume = bnx2x_resume,
11044 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
11045};
11046
11047static int __init bnx2x_init(void)
11048{
1cf167f2
EG
11049 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11050 if (bnx2x_wq == NULL) {
11051 printk(KERN_ERR PFX "Cannot create workqueue\n");
11052 return -ENOMEM;
11053 }
11054
a2fbb9ea
ET
11055 return pci_register_driver(&bnx2x_pci_driver);
11056}
11057
11058static void __exit bnx2x_cleanup(void)
11059{
11060 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
11061
11062 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
11063}
11064
11065module_init(bnx2x_init);
11066module_exit(bnx2x_cleanup);
11067