]> bbs.cooldavid.org Git - net-next-2.6.git/blame - drivers/net/bnx2x_main.c
bnx2x: GPIO accessories
[net-next-2.6.git] / drivers / net / bnx2x_main.c
CommitLineData
34f80b04 1/* bnx2x_main.c: Broadcom Everest network driver.
a2fbb9ea 2 *
d05c26ce 3 * Copyright (c) 2007-2009 Broadcom Corporation
a2fbb9ea
ET
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 *
24e3fcef
EG
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
a2fbb9ea
ET
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
c14423fe 14 * Statistics and Link management by Yitchak Gertner
a2fbb9ea
ET
15 *
16 */
17
a2fbb9ea
ET
18#include <linux/module.h>
19#include <linux/moduleparam.h>
20#include <linux/kernel.h>
21#include <linux/device.h> /* for dev_info() */
22#include <linux/timer.h>
23#include <linux/errno.h>
24#include <linux/ioport.h>
25#include <linux/slab.h>
26#include <linux/vmalloc.h>
27#include <linux/interrupt.h>
28#include <linux/pci.h>
29#include <linux/init.h>
30#include <linux/netdevice.h>
31#include <linux/etherdevice.h>
32#include <linux/skbuff.h>
33#include <linux/dma-mapping.h>
34#include <linux/bitops.h>
35#include <linux/irq.h>
36#include <linux/delay.h>
37#include <asm/byteorder.h>
38#include <linux/time.h>
39#include <linux/ethtool.h>
40#include <linux/mii.h>
0c6671b0 41#include <linux/if_vlan.h>
a2fbb9ea
ET
42#include <net/ip.h>
43#include <net/tcp.h>
44#include <net/checksum.h>
34f80b04 45#include <net/ip6_checksum.h>
a2fbb9ea
ET
46#include <linux/workqueue.h>
47#include <linux/crc32.h>
34f80b04 48#include <linux/crc32c.h>
a2fbb9ea
ET
49#include <linux/prefetch.h>
50#include <linux/zlib.h>
a2fbb9ea
ET
51#include <linux/io.h>
52
53#include "bnx2x_reg.h"
54#include "bnx2x_fw_defs.h"
55#include "bnx2x_hsi.h"
c18487ee 56#include "bnx2x_link.h"
a2fbb9ea
ET
57#include "bnx2x.h"
58#include "bnx2x_init.h"
59
e8b5fc51
VZ
60#define DRV_MODULE_VERSION "1.45.26"
61#define DRV_MODULE_RELDATE "2009/01/26"
34f80b04 62#define BNX2X_BC_VER 0x040200
a2fbb9ea 63
34f80b04
EG
64/* Time in jiffies before concluding the transmitter is hung */
65#define TX_TIMEOUT (5*HZ)
a2fbb9ea 66
53a10565 67static char version[] __devinitdata =
34f80b04 68 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
a2fbb9ea
ET
69 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
70
24e3fcef 71MODULE_AUTHOR("Eliezer Tamir");
e47d7e6e 72MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
a2fbb9ea
ET
73MODULE_LICENSE("GPL");
74MODULE_VERSION(DRV_MODULE_VERSION);
a2fbb9ea 75
555f6c78
EG
76static int multi_mode = 1;
77module_param(multi_mode, int, 0);
78
19680c48 79static int disable_tpa;
a2fbb9ea 80static int poll;
a2fbb9ea 81static int debug;
34f80b04 82static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
a2fbb9ea 83
19680c48 84module_param(disable_tpa, int, 0);
8badd27a
EG
85
86static int int_mode;
87module_param(int_mode, int, 0);
88MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
89
a2fbb9ea 90module_param(poll, int, 0);
a2fbb9ea 91module_param(debug, int, 0);
19680c48 92MODULE_PARM_DESC(disable_tpa, "disable the TPA (LRO) feature");
a2fbb9ea 93MODULE_PARM_DESC(poll, "use polling (for debug)");
c14423fe 94MODULE_PARM_DESC(debug, "default debug msglevel");
a2fbb9ea 95
1cf167f2 96static struct workqueue_struct *bnx2x_wq;
a2fbb9ea
ET
97
98enum bnx2x_board_type {
99 BCM57710 = 0,
34f80b04
EG
100 BCM57711 = 1,
101 BCM57711E = 2,
a2fbb9ea
ET
102};
103
34f80b04 104/* indexed by board_type, above */
53a10565 105static struct {
a2fbb9ea
ET
106 char *name;
107} board_info[] __devinitdata = {
34f80b04
EG
108 { "Broadcom NetXtreme II BCM57710 XGb" },
109 { "Broadcom NetXtreme II BCM57711 XGb" },
110 { "Broadcom NetXtreme II BCM57711E XGb" }
a2fbb9ea
ET
111};
112
34f80b04 113
a2fbb9ea
ET
114static const struct pci_device_id bnx2x_pci_tbl[] = {
115 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
116 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
34f80b04
EG
117 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
118 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
119 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
120 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
a2fbb9ea
ET
121 { 0 }
122};
123
124MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
125
126/****************************************************************************
127* General service functions
128****************************************************************************/
129
130/* used only at init
131 * locking is done by mcp
132 */
133static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
134{
135 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
136 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
137 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
138 PCICFG_VENDOR_ID_OFFSET);
139}
140
a2fbb9ea
ET
141static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
142{
143 u32 val;
144
145 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
146 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
148 PCICFG_VENDOR_ID_OFFSET);
149
150 return val;
151}
a2fbb9ea
ET
152
153static const u32 dmae_reg_go_c[] = {
154 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
155 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
156 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
157 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
158};
159
160/* copy command into DMAE command memory and set DMAE command go */
161static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
162 int idx)
163{
164 u32 cmd_offset;
165 int i;
166
167 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
168 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
169 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
170
ad8d3948
EG
171 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
172 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
a2fbb9ea
ET
173 }
174 REG_WR(bp, dmae_reg_go_c[idx], 1);
175}
176
ad8d3948
EG
177void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
178 u32 len32)
a2fbb9ea 179{
ad8d3948 180 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 181 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
182 int cnt = 200;
183
184 if (!bp->dmae_ready) {
185 u32 *data = bnx2x_sp(bp, wb_data[0]);
186
187 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
188 " using indirect\n", dst_addr, len32);
189 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
190 return;
191 }
192
193 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
194
195 memset(dmae, 0, sizeof(struct dmae_command));
196
197 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
198 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
199 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
200#ifdef __BIG_ENDIAN
201 DMAE_CMD_ENDIANITY_B_DW_SWAP |
202#else
203 DMAE_CMD_ENDIANITY_DW_SWAP |
204#endif
34f80b04
EG
205 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
206 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
207 dmae->src_addr_lo = U64_LO(dma_addr);
208 dmae->src_addr_hi = U64_HI(dma_addr);
209 dmae->dst_addr_lo = dst_addr >> 2;
210 dmae->dst_addr_hi = 0;
211 dmae->len = len32;
212 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
213 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 214 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 215
ad8d3948 216 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
217 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
218 "dst_addr [%x:%08x (%08x)]\n"
219 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
220 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
221 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
222 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
ad8d3948 223 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
224 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
225 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
a2fbb9ea
ET
226
227 *wb_comp = 0;
228
34f80b04 229 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
230
231 udelay(5);
ad8d3948
EG
232
233 while (*wb_comp != DMAE_COMP_VAL) {
234 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
235
ad8d3948 236 if (!cnt) {
a2fbb9ea
ET
237 BNX2X_ERR("dmae timeout!\n");
238 break;
239 }
ad8d3948 240 cnt--;
12469401
YG
241 /* adjust delay for emulation/FPGA */
242 if (CHIP_REV_IS_SLOW(bp))
243 msleep(100);
244 else
245 udelay(5);
a2fbb9ea 246 }
ad8d3948
EG
247
248 mutex_unlock(&bp->dmae_mutex);
a2fbb9ea
ET
249}
250
c18487ee 251void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
a2fbb9ea 252{
ad8d3948 253 struct dmae_command *dmae = &bp->init_dmae;
a2fbb9ea 254 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
ad8d3948
EG
255 int cnt = 200;
256
257 if (!bp->dmae_ready) {
258 u32 *data = bnx2x_sp(bp, wb_data[0]);
259 int i;
260
261 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
262 " using indirect\n", src_addr, len32);
263 for (i = 0; i < len32; i++)
264 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
265 return;
266 }
267
268 mutex_lock(&bp->dmae_mutex);
a2fbb9ea
ET
269
270 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
271 memset(dmae, 0, sizeof(struct dmae_command));
272
273 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
274 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
275 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
276#ifdef __BIG_ENDIAN
277 DMAE_CMD_ENDIANITY_B_DW_SWAP |
278#else
279 DMAE_CMD_ENDIANITY_DW_SWAP |
280#endif
34f80b04
EG
281 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
282 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea
ET
283 dmae->src_addr_lo = src_addr >> 2;
284 dmae->src_addr_hi = 0;
285 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
286 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
287 dmae->len = len32;
288 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
289 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
ad8d3948 290 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 291
ad8d3948 292 DP(BNX2X_MSG_OFF, "dmae: opcode 0x%08x\n"
a2fbb9ea
ET
293 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
294 "dst_addr [%x:%08x (%08x)]\n"
295 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
296 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
297 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
298 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
a2fbb9ea
ET
299
300 *wb_comp = 0;
301
34f80b04 302 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
a2fbb9ea
ET
303
304 udelay(5);
ad8d3948
EG
305
306 while (*wb_comp != DMAE_COMP_VAL) {
307
ad8d3948 308 if (!cnt) {
a2fbb9ea
ET
309 BNX2X_ERR("dmae timeout!\n");
310 break;
311 }
ad8d3948 312 cnt--;
12469401
YG
313 /* adjust delay for emulation/FPGA */
314 if (CHIP_REV_IS_SLOW(bp))
315 msleep(100);
316 else
317 udelay(5);
a2fbb9ea 318 }
ad8d3948 319 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
a2fbb9ea
ET
320 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
321 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
ad8d3948
EG
322
323 mutex_unlock(&bp->dmae_mutex);
324}
325
326/* used only for slowpath so not inlined */
327static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
328{
329 u32 wb_write[2];
330
331 wb_write[0] = val_hi;
332 wb_write[1] = val_lo;
333 REG_WR_DMAE(bp, reg, wb_write, 2);
a2fbb9ea 334}
a2fbb9ea 335
ad8d3948
EG
336#ifdef USE_WB_RD
337static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
338{
339 u32 wb_data[2];
340
341 REG_RD_DMAE(bp, reg, wb_data, 2);
342
343 return HILO_U64(wb_data[0], wb_data[1]);
344}
345#endif
346
a2fbb9ea
ET
347static int bnx2x_mc_assert(struct bnx2x *bp)
348{
a2fbb9ea 349 char last_idx;
34f80b04
EG
350 int i, rc = 0;
351 u32 row0, row1, row2, row3;
352
353 /* XSTORM */
354 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
355 XSTORM_ASSERT_LIST_INDEX_OFFSET);
356 if (last_idx)
357 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
358
359 /* print the asserts */
360 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
361
362 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
363 XSTORM_ASSERT_LIST_OFFSET(i));
364 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
365 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
366 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
367 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
368 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
369 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
370
371 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
372 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
373 " 0x%08x 0x%08x 0x%08x\n",
374 i, row3, row2, row1, row0);
375 rc++;
376 } else {
377 break;
378 }
379 }
380
381 /* TSTORM */
382 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
383 TSTORM_ASSERT_LIST_INDEX_OFFSET);
384 if (last_idx)
385 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
386
387 /* print the asserts */
388 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
389
390 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
391 TSTORM_ASSERT_LIST_OFFSET(i));
392 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
393 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
394 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
395 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
396 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
397 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
398
399 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
400 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
401 " 0x%08x 0x%08x 0x%08x\n",
402 i, row3, row2, row1, row0);
403 rc++;
404 } else {
405 break;
406 }
407 }
408
409 /* CSTORM */
410 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
411 CSTORM_ASSERT_LIST_INDEX_OFFSET);
412 if (last_idx)
413 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
414
415 /* print the asserts */
416 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
417
418 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
419 CSTORM_ASSERT_LIST_OFFSET(i));
420 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
421 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
422 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
423 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
424 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
425 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
426
427 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
428 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
429 " 0x%08x 0x%08x 0x%08x\n",
430 i, row3, row2, row1, row0);
431 rc++;
432 } else {
433 break;
434 }
435 }
436
437 /* USTORM */
438 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
439 USTORM_ASSERT_LIST_INDEX_OFFSET);
440 if (last_idx)
441 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
442
443 /* print the asserts */
444 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
445
446 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
447 USTORM_ASSERT_LIST_OFFSET(i));
448 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
449 USTORM_ASSERT_LIST_OFFSET(i) + 4);
450 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
451 USTORM_ASSERT_LIST_OFFSET(i) + 8);
452 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
453 USTORM_ASSERT_LIST_OFFSET(i) + 12);
454
455 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
456 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
457 " 0x%08x 0x%08x 0x%08x\n",
458 i, row3, row2, row1, row0);
459 rc++;
460 } else {
461 break;
a2fbb9ea
ET
462 }
463 }
34f80b04 464
a2fbb9ea
ET
465 return rc;
466}
c14423fe 467
a2fbb9ea
ET
468static void bnx2x_fw_dump(struct bnx2x *bp)
469{
470 u32 mark, offset;
471 u32 data[9];
472 int word;
473
474 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
49d66772
ET
475 mark = ((mark + 0x3) & ~0x3);
476 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n" KERN_ERR, mark);
a2fbb9ea
ET
477
478 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
479 for (word = 0; word < 8; word++)
480 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
481 offset + 4*word));
482 data[8] = 0x0;
49d66772 483 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
484 }
485 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
486 for (word = 0; word < 8; word++)
487 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
488 offset + 4*word));
489 data[8] = 0x0;
49d66772 490 printk(KERN_CONT "%s", (char *)data);
a2fbb9ea
ET
491 }
492 printk("\n" KERN_ERR PFX "end of fw dump\n");
493}
494
495static void bnx2x_panic_dump(struct bnx2x *bp)
496{
497 int i;
498 u16 j, start, end;
499
66e855f3
YG
500 bp->stats_state = STATS_STATE_DISABLED;
501 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
502
a2fbb9ea
ET
503 BNX2X_ERR("begin crash dump -----------------\n");
504
505 for_each_queue(bp, i) {
506 struct bnx2x_fastpath *fp = &bp->fp[i];
507 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
508
509 BNX2X_ERR("queue[%d]: tx_pkt_prod(%x) tx_pkt_cons(%x)"
34f80b04 510 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
a2fbb9ea 511 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
34f80b04 512 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
66e855f3
YG
513 BNX2X_ERR(" rx_bd_prod(%x) rx_bd_cons(%x)"
514 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
515 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
516 fp->rx_bd_prod, fp->rx_bd_cons,
517 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
518 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
519 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
520 " fp_c_idx(%x) *sb_c_idx(%x) fp_u_idx(%x)"
521 " *sb_u_idx(%x) bd data(%x,%x)\n",
522 fp->rx_sge_prod, fp->last_max_sge, fp->fp_c_idx,
523 fp->status_blk->c_status_block.status_block_index,
524 fp->fp_u_idx,
525 fp->status_blk->u_status_block.status_block_index,
526 hw_prods->packets_prod, hw_prods->bds_prod);
a2fbb9ea
ET
527
528 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
529 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
530 for (j = start; j < end; j++) {
531 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
532
533 BNX2X_ERR("packet[%x]=[%p,%x]\n", j,
534 sw_bd->skb, sw_bd->first_bd);
535 }
536
537 start = TX_BD(fp->tx_bd_cons - 10);
538 end = TX_BD(fp->tx_bd_cons + 254);
539 for (j = start; j < end; j++) {
540 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
541
542 BNX2X_ERR("tx_bd[%x]=[%x:%x:%x:%x]\n",
543 j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
544 }
545
546 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
547 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
548 for (j = start; j < end; j++) {
549 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
550 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
551
552 BNX2X_ERR("rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
34f80b04 553 j, rx_bd[1], rx_bd[0], sw_bd->skb);
a2fbb9ea
ET
554 }
555
3196a88a
EG
556 start = RX_SGE(fp->rx_sge_prod);
557 end = RX_SGE(fp->last_max_sge);
7a9b2557
VZ
558 for (j = start; j < end; j++) {
559 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
560 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
561
562 BNX2X_ERR("rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
563 j, rx_sge[1], rx_sge[0], sw_page->page);
564 }
565
a2fbb9ea
ET
566 start = RCQ_BD(fp->rx_comp_cons - 10);
567 end = RCQ_BD(fp->rx_comp_cons + 503);
568 for (j = start; j < end; j++) {
569 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
570
571 BNX2X_ERR("cqe[%x]=[%x:%x:%x:%x]\n",
572 j, cqe[0], cqe[1], cqe[2], cqe[3]);
573 }
574 }
575
49d66772
ET
576 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
577 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
a2fbb9ea 578 " spq_prod_idx(%u)\n",
49d66772 579 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
a2fbb9ea
ET
580 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
581
34f80b04 582 bnx2x_fw_dump(bp);
a2fbb9ea
ET
583 bnx2x_mc_assert(bp);
584 BNX2X_ERR("end crash dump -----------------\n");
a2fbb9ea
ET
585}
586
615f8fd9 587static void bnx2x_int_enable(struct bnx2x *bp)
a2fbb9ea 588{
34f80b04 589 int port = BP_PORT(bp);
a2fbb9ea
ET
590 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
591 u32 val = REG_RD(bp, addr);
592 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 593 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
a2fbb9ea
ET
594
595 if (msix) {
8badd27a
EG
596 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
597 HC_CONFIG_0_REG_INT_LINE_EN_0);
a2fbb9ea
ET
598 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
599 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
8badd27a
EG
600 } else if (msi) {
601 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
602 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
603 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
604 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
a2fbb9ea
ET
605 } else {
606 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
615f8fd9 607 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
a2fbb9ea
ET
608 HC_CONFIG_0_REG_INT_LINE_EN_0 |
609 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
615f8fd9 610
8badd27a
EG
611 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
612 val, port, addr);
615f8fd9
ET
613
614 REG_WR(bp, addr, val);
615
a2fbb9ea
ET
616 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
617 }
618
8badd27a
EG
619 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
620 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
a2fbb9ea
ET
621
622 REG_WR(bp, addr, val);
34f80b04
EG
623
624 if (CHIP_IS_E1H(bp)) {
625 /* init leading/trailing edge */
626 if (IS_E1HMF(bp)) {
8badd27a 627 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
34f80b04 628 if (bp->port.pmf)
4acac6a5
EG
629 /* enable nig and gpio3 attention */
630 val |= 0x1100;
34f80b04
EG
631 } else
632 val = 0xffff;
633
634 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
635 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
636 }
a2fbb9ea
ET
637}
638
615f8fd9 639static void bnx2x_int_disable(struct bnx2x *bp)
a2fbb9ea 640{
34f80b04 641 int port = BP_PORT(bp);
a2fbb9ea
ET
642 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
643 u32 val = REG_RD(bp, addr);
644
645 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
646 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
647 HC_CONFIG_0_REG_INT_LINE_EN_0 |
648 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
649
650 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
651 val, port, addr);
652
8badd27a
EG
653 /* flush all outstanding writes */
654 mmiowb();
655
a2fbb9ea
ET
656 REG_WR(bp, addr, val);
657 if (REG_RD(bp, addr) != val)
658 BNX2X_ERR("BUG! proper val not read from IGU!\n");
659}
660
f8ef6e44 661static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
a2fbb9ea 662{
a2fbb9ea 663 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
8badd27a 664 int i, offset;
a2fbb9ea 665
34f80b04 666 /* disable interrupt handling */
a2fbb9ea 667 atomic_inc(&bp->intr_sem);
f8ef6e44
YG
668 if (disable_hw)
669 /* prevent the HW from sending interrupts */
670 bnx2x_int_disable(bp);
a2fbb9ea
ET
671
672 /* make sure all ISRs are done */
673 if (msix) {
8badd27a
EG
674 synchronize_irq(bp->msix_table[0].vector);
675 offset = 1;
a2fbb9ea 676 for_each_queue(bp, i)
8badd27a 677 synchronize_irq(bp->msix_table[i + offset].vector);
a2fbb9ea
ET
678 } else
679 synchronize_irq(bp->pdev->irq);
680
681 /* make sure sp_task is not running */
1cf167f2
EG
682 cancel_delayed_work(&bp->sp_task);
683 flush_workqueue(bnx2x_wq);
a2fbb9ea
ET
684}
685
34f80b04 686/* fast path */
a2fbb9ea
ET
687
688/*
34f80b04 689 * General service functions
a2fbb9ea
ET
690 */
691
34f80b04 692static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
a2fbb9ea
ET
693 u8 storm, u16 index, u8 op, u8 update)
694{
5c862848
EG
695 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
696 COMMAND_REG_INT_ACK);
a2fbb9ea
ET
697 struct igu_ack_register igu_ack;
698
699 igu_ack.status_block_index = index;
700 igu_ack.sb_id_and_flags =
34f80b04 701 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
a2fbb9ea
ET
702 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
703 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
704 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
705
5c862848
EG
706 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
707 (*(u32 *)&igu_ack), hc_addr);
708 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
a2fbb9ea
ET
709}
710
711static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
712{
713 struct host_status_block *fpsb = fp->status_blk;
714 u16 rc = 0;
715
716 barrier(); /* status block is written to by the chip */
717 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
718 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
719 rc |= 1;
720 }
721 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
722 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
723 rc |= 2;
724 }
725 return rc;
726}
727
a2fbb9ea
ET
728static u16 bnx2x_ack_int(struct bnx2x *bp)
729{
5c862848
EG
730 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
731 COMMAND_REG_SIMD_MASK);
732 u32 result = REG_RD(bp, hc_addr);
a2fbb9ea 733
5c862848
EG
734 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
735 result, hc_addr);
a2fbb9ea 736
a2fbb9ea
ET
737 return result;
738}
739
740
741/*
742 * fast path service functions
743 */
744
237907c1
EG
745static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
746{
747 u16 tx_cons_sb;
748
749 /* Tell compiler that status block fields can change */
750 barrier();
751 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
e8b5fc51
VZ
752 return (fp->tx_pkt_cons != tx_cons_sb);
753}
754
755static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
756{
757 /* Tell compiler that consumer and producer can change */
758 barrier();
759 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
760
237907c1
EG
761}
762
a2fbb9ea
ET
763/* free skb in the packet ring at pos idx
764 * return idx of last bd freed
765 */
766static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
767 u16 idx)
768{
769 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
770 struct eth_tx_bd *tx_bd;
771 struct sk_buff *skb = tx_buf->skb;
34f80b04 772 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
a2fbb9ea
ET
773 int nbd;
774
775 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
776 idx, tx_buf, skb);
777
778 /* unmap first bd */
779 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
780 tx_bd = &fp->tx_desc_ring[bd_idx];
781 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
782 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
783
784 nbd = le16_to_cpu(tx_bd->nbd) - 1;
34f80b04 785 new_cons = nbd + tx_buf->first_bd;
a2fbb9ea
ET
786#ifdef BNX2X_STOP_ON_ERROR
787 if (nbd > (MAX_SKB_FRAGS + 2)) {
34f80b04 788 BNX2X_ERR("BAD nbd!\n");
a2fbb9ea
ET
789 bnx2x_panic();
790 }
791#endif
792
793 /* Skip a parse bd and the TSO split header bd
794 since they have no mapping */
795 if (nbd)
796 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
797
798 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
799 ETH_TX_BD_FLAGS_TCP_CSUM |
800 ETH_TX_BD_FLAGS_SW_LSO)) {
801 if (--nbd)
802 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
803 tx_bd = &fp->tx_desc_ring[bd_idx];
804 /* is this a TSO split header bd? */
805 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
806 if (--nbd)
807 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
808 }
809 }
810
811 /* now free frags */
812 while (nbd > 0) {
813
814 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
815 tx_bd = &fp->tx_desc_ring[bd_idx];
816 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
817 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
818 if (--nbd)
819 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
820 }
821
822 /* release skb */
53e5e96e 823 WARN_ON(!skb);
a2fbb9ea
ET
824 dev_kfree_skb(skb);
825 tx_buf->first_bd = 0;
826 tx_buf->skb = NULL;
827
34f80b04 828 return new_cons;
a2fbb9ea
ET
829}
830
34f80b04 831static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
a2fbb9ea 832{
34f80b04
EG
833 s16 used;
834 u16 prod;
835 u16 cons;
a2fbb9ea 836
34f80b04 837 barrier(); /* Tell compiler that prod and cons can change */
a2fbb9ea
ET
838 prod = fp->tx_bd_prod;
839 cons = fp->tx_bd_cons;
840
34f80b04
EG
841 /* NUM_TX_RINGS = number of "next-page" entries
842 It will be used as a threshold */
843 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
a2fbb9ea 844
34f80b04 845#ifdef BNX2X_STOP_ON_ERROR
53e5e96e
IJ
846 WARN_ON(used < 0);
847 WARN_ON(used > fp->bp->tx_ring_size);
848 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
34f80b04 849#endif
a2fbb9ea 850
34f80b04 851 return (s16)(fp->bp->tx_ring_size) - used;
a2fbb9ea
ET
852}
853
854static void bnx2x_tx_int(struct bnx2x_fastpath *fp, int work)
855{
856 struct bnx2x *bp = fp->bp;
555f6c78 857 struct netdev_queue *txq;
a2fbb9ea
ET
858 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
859 int done = 0;
860
861#ifdef BNX2X_STOP_ON_ERROR
862 if (unlikely(bp->panic))
863 return;
864#endif
865
555f6c78 866 txq = netdev_get_tx_queue(bp->dev, fp->index);
a2fbb9ea
ET
867 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
868 sw_cons = fp->tx_pkt_cons;
869
870 while (sw_cons != hw_cons) {
871 u16 pkt_cons;
872
873 pkt_cons = TX_BD(sw_cons);
874
875 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
876
34f80b04 877 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
a2fbb9ea
ET
878 hw_cons, sw_cons, pkt_cons);
879
34f80b04 880/* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
a2fbb9ea
ET
881 rmb();
882 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
883 }
884*/
885 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
886 sw_cons++;
887 done++;
888
889 if (done == work)
890 break;
891 }
892
893 fp->tx_pkt_cons = sw_cons;
894 fp->tx_bd_cons = bd_cons;
895
555f6c78
EG
896 /* Need to make the tx_bd_cons update visible to start_xmit()
897 * before checking for netif_tx_queue_stopped(). Without the
a2fbb9ea
ET
898 * memory barrier, there is a small possibility that start_xmit()
899 * will miss it and cause the queue to be stopped forever.
900 */
901 smp_mb();
902
903 /* TBD need a thresh? */
555f6c78 904 if (unlikely(netif_tx_queue_stopped(txq))) {
a2fbb9ea 905
555f6c78 906 __netif_tx_lock(txq, smp_processor_id());
a2fbb9ea 907
555f6c78 908 if ((netif_tx_queue_stopped(txq)) &&
da5a662a 909 (bp->state == BNX2X_STATE_OPEN) &&
a2fbb9ea 910 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
555f6c78 911 netif_tx_wake_queue(txq);
a2fbb9ea 912
555f6c78 913 __netif_tx_unlock(txq);
a2fbb9ea
ET
914 }
915}
916
3196a88a 917
a2fbb9ea
ET
918static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
919 union eth_rx_cqe *rr_cqe)
920{
921 struct bnx2x *bp = fp->bp;
922 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
923 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
924
34f80b04 925 DP(BNX2X_MSG_SP,
a2fbb9ea 926 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
34f80b04
EG
927 FP_IDX(fp), cid, command, bp->state,
928 rr_cqe->ramrod_cqe.ramrod_type);
a2fbb9ea
ET
929
930 bp->spq_left++;
931
34f80b04 932 if (FP_IDX(fp)) {
a2fbb9ea
ET
933 switch (command | fp->state) {
934 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
935 BNX2X_FP_STATE_OPENING):
936 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
937 cid);
938 fp->state = BNX2X_FP_STATE_OPEN;
939 break;
940
941 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
942 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
943 cid);
944 fp->state = BNX2X_FP_STATE_HALTED;
945 break;
946
947 default:
34f80b04
EG
948 BNX2X_ERR("unexpected MC reply (%d) "
949 "fp->state is %x\n", command, fp->state);
950 break;
a2fbb9ea 951 }
34f80b04 952 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
953 return;
954 }
c14423fe 955
a2fbb9ea
ET
956 switch (command | bp->state) {
957 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
958 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
959 bp->state = BNX2X_STATE_OPEN;
960 break;
961
962 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
963 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
964 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
965 fp->state = BNX2X_FP_STATE_HALTED;
966 break;
967
a2fbb9ea 968 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 969 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
49d66772 970 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
a2fbb9ea
ET
971 break;
972
3196a88a 973
a2fbb9ea 974 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
34f80b04 975 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
a2fbb9ea 976 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
bb2a0f7a 977 bp->set_mac_pending = 0;
a2fbb9ea
ET
978 break;
979
49d66772 980 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
34f80b04 981 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
49d66772
ET
982 break;
983
a2fbb9ea 984 default:
34f80b04 985 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
a2fbb9ea 986 command, bp->state);
34f80b04 987 break;
a2fbb9ea 988 }
34f80b04 989 mb(); /* force bnx2x_wait_ramrod() to see the change */
a2fbb9ea
ET
990}
991
7a9b2557
VZ
992static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
993 struct bnx2x_fastpath *fp, u16 index)
994{
995 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
996 struct page *page = sw_buf->page;
997 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
998
999 /* Skip "next page" elements */
1000 if (!page)
1001 return;
1002
1003 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
4f40f2cb 1004 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1005 __free_pages(page, PAGES_PER_SGE_SHIFT);
1006
1007 sw_buf->page = NULL;
1008 sge->addr_hi = 0;
1009 sge->addr_lo = 0;
1010}
1011
1012static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1013 struct bnx2x_fastpath *fp, int last)
1014{
1015 int i;
1016
1017 for (i = 0; i < last; i++)
1018 bnx2x_free_rx_sge(bp, fp, i);
1019}
1020
1021static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1022 struct bnx2x_fastpath *fp, u16 index)
1023{
1024 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1025 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1026 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1027 dma_addr_t mapping;
1028
1029 if (unlikely(page == NULL))
1030 return -ENOMEM;
1031
4f40f2cb 1032 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
7a9b2557 1033 PCI_DMA_FROMDEVICE);
8d8bb39b 1034 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
7a9b2557
VZ
1035 __free_pages(page, PAGES_PER_SGE_SHIFT);
1036 return -ENOMEM;
1037 }
1038
1039 sw_buf->page = page;
1040 pci_unmap_addr_set(sw_buf, mapping, mapping);
1041
1042 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1043 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1044
1045 return 0;
1046}
1047
a2fbb9ea
ET
1048static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1049 struct bnx2x_fastpath *fp, u16 index)
1050{
1051 struct sk_buff *skb;
1052 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1053 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1054 dma_addr_t mapping;
1055
1056 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1057 if (unlikely(skb == NULL))
1058 return -ENOMEM;
1059
437cf2f1 1060 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
a2fbb9ea 1061 PCI_DMA_FROMDEVICE);
8d8bb39b 1062 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
a2fbb9ea
ET
1063 dev_kfree_skb(skb);
1064 return -ENOMEM;
1065 }
1066
1067 rx_buf->skb = skb;
1068 pci_unmap_addr_set(rx_buf, mapping, mapping);
1069
1070 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1071 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1072
1073 return 0;
1074}
1075
1076/* note that we are not allocating a new skb,
1077 * we are just moving one from cons to prod
1078 * we are not creating a new mapping,
1079 * so there is no need to check for dma_mapping_error().
1080 */
1081static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1082 struct sk_buff *skb, u16 cons, u16 prod)
1083{
1084 struct bnx2x *bp = fp->bp;
1085 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1086 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1087 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1088 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1089
1090 pci_dma_sync_single_for_device(bp->pdev,
1091 pci_unmap_addr(cons_rx_buf, mapping),
87942b46 1092 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
a2fbb9ea
ET
1093
1094 prod_rx_buf->skb = cons_rx_buf->skb;
1095 pci_unmap_addr_set(prod_rx_buf, mapping,
1096 pci_unmap_addr(cons_rx_buf, mapping));
1097 *prod_bd = *cons_bd;
1098}
1099
7a9b2557
VZ
1100static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1101 u16 idx)
1102{
1103 u16 last_max = fp->last_max_sge;
1104
1105 if (SUB_S16(idx, last_max) > 0)
1106 fp->last_max_sge = idx;
1107}
1108
1109static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1110{
1111 int i, j;
1112
1113 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1114 int idx = RX_SGE_CNT * i - 1;
1115
1116 for (j = 0; j < 2; j++) {
1117 SGE_MASK_CLEAR_BIT(fp, idx);
1118 idx--;
1119 }
1120 }
1121}
1122
1123static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1124 struct eth_fast_path_rx_cqe *fp_cqe)
1125{
1126 struct bnx2x *bp = fp->bp;
4f40f2cb 1127 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
7a9b2557 1128 le16_to_cpu(fp_cqe->len_on_bd)) >>
4f40f2cb 1129 SGE_PAGE_SHIFT;
7a9b2557
VZ
1130 u16 last_max, last_elem, first_elem;
1131 u16 delta = 0;
1132 u16 i;
1133
1134 if (!sge_len)
1135 return;
1136
1137 /* First mark all used pages */
1138 for (i = 0; i < sge_len; i++)
1139 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1140
1141 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1142 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1143
1144 /* Here we assume that the last SGE index is the biggest */
1145 prefetch((void *)(fp->sge_mask));
1146 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1147
1148 last_max = RX_SGE(fp->last_max_sge);
1149 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1150 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1151
1152 /* If ring is not full */
1153 if (last_elem + 1 != first_elem)
1154 last_elem++;
1155
1156 /* Now update the prod */
1157 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1158 if (likely(fp->sge_mask[i]))
1159 break;
1160
1161 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1162 delta += RX_SGE_MASK_ELEM_SZ;
1163 }
1164
1165 if (delta > 0) {
1166 fp->rx_sge_prod += delta;
1167 /* clear page-end entries */
1168 bnx2x_clear_sge_mask_next_elems(fp);
1169 }
1170
1171 DP(NETIF_MSG_RX_STATUS,
1172 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1173 fp->last_max_sge, fp->rx_sge_prod);
1174}
1175
1176static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1177{
1178 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1179 memset(fp->sge_mask, 0xff,
1180 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1181
33471629
EG
1182 /* Clear the two last indices in the page to 1:
1183 these are the indices that correspond to the "next" element,
7a9b2557
VZ
1184 hence will never be indicated and should be removed from
1185 the calculations. */
1186 bnx2x_clear_sge_mask_next_elems(fp);
1187}
1188
1189static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1190 struct sk_buff *skb, u16 cons, u16 prod)
1191{
1192 struct bnx2x *bp = fp->bp;
1193 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1194 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1195 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1196 dma_addr_t mapping;
1197
1198 /* move empty skb from pool to prod and map it */
1199 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1200 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
437cf2f1 1201 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1202 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1203
1204 /* move partial skb from cons to pool (don't unmap yet) */
1205 fp->tpa_pool[queue] = *cons_rx_buf;
1206
1207 /* mark bin state as start - print error if current state != stop */
1208 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1209 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1210
1211 fp->tpa_state[queue] = BNX2X_TPA_START;
1212
1213 /* point prod_bd to new skb */
1214 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1215 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1216
1217#ifdef BNX2X_STOP_ON_ERROR
1218 fp->tpa_queue_used |= (1 << queue);
1219#ifdef __powerpc64__
1220 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1221#else
1222 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1223#endif
1224 fp->tpa_queue_used);
1225#endif
1226}
1227
1228static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1229 struct sk_buff *skb,
1230 struct eth_fast_path_rx_cqe *fp_cqe,
1231 u16 cqe_idx)
1232{
1233 struct sw_rx_page *rx_pg, old_rx_pg;
7a9b2557
VZ
1234 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1235 u32 i, frag_len, frag_size, pages;
1236 int err;
1237 int j;
1238
1239 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
4f40f2cb 1240 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
1241
1242 /* This is needed in order to enable forwarding support */
1243 if (frag_size)
4f40f2cb 1244 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
7a9b2557
VZ
1245 max(frag_size, (u32)len_on_bd));
1246
1247#ifdef BNX2X_STOP_ON_ERROR
4f40f2cb
EG
1248 if (pages >
1249 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
7a9b2557
VZ
1250 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1251 pages, cqe_idx);
1252 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1253 fp_cqe->pkt_len, len_on_bd);
1254 bnx2x_panic();
1255 return -EINVAL;
1256 }
1257#endif
1258
1259 /* Run through the SGL and compose the fragmented skb */
1260 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1261 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1262
1263 /* FW gives the indices of the SGE as if the ring is an array
1264 (meaning that "next" element will consume 2 indices) */
4f40f2cb 1265 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
7a9b2557 1266 rx_pg = &fp->rx_page_ring[sge_idx];
7a9b2557
VZ
1267 old_rx_pg = *rx_pg;
1268
1269 /* If we fail to allocate a substitute page, we simply stop
1270 where we are and drop the whole packet */
1271 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1272 if (unlikely(err)) {
de832a55 1273 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1274 return err;
1275 }
1276
1277 /* Unmap the page as we r going to pass it to the stack */
1278 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
4f40f2cb 1279 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
7a9b2557
VZ
1280
1281 /* Add one frag and update the appropriate fields in the skb */
1282 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1283
1284 skb->data_len += frag_len;
1285 skb->truesize += frag_len;
1286 skb->len += frag_len;
1287
1288 frag_size -= frag_len;
1289 }
1290
1291 return 0;
1292}
1293
1294static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1295 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1296 u16 cqe_idx)
1297{
1298 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1299 struct sk_buff *skb = rx_buf->skb;
1300 /* alloc new skb */
1301 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1302
1303 /* Unmap skb in the pool anyway, as we are going to change
1304 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1305 fails. */
1306 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
437cf2f1 1307 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
7a9b2557 1308
7a9b2557 1309 if (likely(new_skb)) {
66e855f3
YG
1310 /* fix ip xsum and give it to the stack */
1311 /* (no need to map the new skb) */
0c6671b0
EG
1312#ifdef BCM_VLAN
1313 int is_vlan_cqe =
1314 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1315 PARSING_FLAGS_VLAN);
1316 int is_not_hwaccel_vlan_cqe =
1317 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1318#endif
7a9b2557
VZ
1319
1320 prefetch(skb);
1321 prefetch(((char *)(skb)) + 128);
1322
7a9b2557
VZ
1323#ifdef BNX2X_STOP_ON_ERROR
1324 if (pad + len > bp->rx_buf_size) {
1325 BNX2X_ERR("skb_put is about to fail... "
1326 "pad %d len %d rx_buf_size %d\n",
1327 pad, len, bp->rx_buf_size);
1328 bnx2x_panic();
1329 return;
1330 }
1331#endif
1332
1333 skb_reserve(skb, pad);
1334 skb_put(skb, len);
1335
1336 skb->protocol = eth_type_trans(skb, bp->dev);
1337 skb->ip_summed = CHECKSUM_UNNECESSARY;
1338
1339 {
1340 struct iphdr *iph;
1341
1342 iph = (struct iphdr *)skb->data;
0c6671b0
EG
1343#ifdef BCM_VLAN
1344 /* If there is no Rx VLAN offloading -
1345 take VLAN tag into an account */
1346 if (unlikely(is_not_hwaccel_vlan_cqe))
1347 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1348#endif
7a9b2557
VZ
1349 iph->check = 0;
1350 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1351 }
1352
1353 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1354 &cqe->fast_path_cqe, cqe_idx)) {
1355#ifdef BCM_VLAN
0c6671b0
EG
1356 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1357 (!is_not_hwaccel_vlan_cqe))
7a9b2557
VZ
1358 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1359 le16_to_cpu(cqe->fast_path_cqe.
1360 vlan_tag));
1361 else
1362#endif
1363 netif_receive_skb(skb);
1364 } else {
1365 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1366 " - dropping packet!\n");
1367 dev_kfree_skb(skb);
1368 }
1369
7a9b2557
VZ
1370
1371 /* put new skb in bin */
1372 fp->tpa_pool[queue].skb = new_skb;
1373
1374 } else {
66e855f3 1375 /* else drop the packet and keep the buffer in the bin */
7a9b2557
VZ
1376 DP(NETIF_MSG_RX_STATUS,
1377 "Failed to allocate new skb - dropping packet!\n");
de832a55 1378 fp->eth_q_stats.rx_skb_alloc_failed++;
7a9b2557
VZ
1379 }
1380
1381 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1382}
1383
1384static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1385 struct bnx2x_fastpath *fp,
1386 u16 bd_prod, u16 rx_comp_prod,
1387 u16 rx_sge_prod)
1388{
8d9c5f34 1389 struct ustorm_eth_rx_producers rx_prods = {0};
7a9b2557
VZ
1390 int i;
1391
1392 /* Update producers */
1393 rx_prods.bd_prod = bd_prod;
1394 rx_prods.cqe_prod = rx_comp_prod;
1395 rx_prods.sge_prod = rx_sge_prod;
1396
58f4c4cf
EG
1397 /*
1398 * Make sure that the BD and SGE data is updated before updating the
1399 * producers since FW might read the BD/SGE right after the producer
1400 * is updated.
1401 * This is only applicable for weak-ordered memory model archs such
1402 * as IA-64. The following barrier is also mandatory since FW will
1403 * assumes BDs must have buffers.
1404 */
1405 wmb();
1406
8d9c5f34
EG
1407 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1408 REG_WR(bp, BAR_USTRORM_INTMEM +
1409 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), FP_CL_ID(fp)) + i*4,
7a9b2557
VZ
1410 ((u32 *)&rx_prods)[i]);
1411
58f4c4cf
EG
1412 mmiowb(); /* keep prod updates ordered */
1413
7a9b2557 1414 DP(NETIF_MSG_RX_STATUS,
555f6c78
EG
1415 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1416 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
7a9b2557
VZ
1417}
1418
a2fbb9ea
ET
1419static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1420{
1421 struct bnx2x *bp = fp->bp;
34f80b04 1422 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
a2fbb9ea
ET
1423 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1424 int rx_pkt = 0;
1425
1426#ifdef BNX2X_STOP_ON_ERROR
1427 if (unlikely(bp->panic))
1428 return 0;
1429#endif
1430
34f80b04
EG
1431 /* CQ "next element" is of the size of the regular element,
1432 that's why it's ok here */
a2fbb9ea
ET
1433 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1434 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1435 hw_comp_cons++;
1436
1437 bd_cons = fp->rx_bd_cons;
1438 bd_prod = fp->rx_bd_prod;
34f80b04 1439 bd_prod_fw = bd_prod;
a2fbb9ea
ET
1440 sw_comp_cons = fp->rx_comp_cons;
1441 sw_comp_prod = fp->rx_comp_prod;
1442
1443 /* Memory barrier necessary as speculative reads of the rx
1444 * buffer can be ahead of the index in the status block
1445 */
1446 rmb();
1447
1448 DP(NETIF_MSG_RX_STATUS,
1449 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
34f80b04 1450 FP_IDX(fp), hw_comp_cons, sw_comp_cons);
a2fbb9ea
ET
1451
1452 while (sw_comp_cons != hw_comp_cons) {
34f80b04 1453 struct sw_rx_bd *rx_buf = NULL;
a2fbb9ea
ET
1454 struct sk_buff *skb;
1455 union eth_rx_cqe *cqe;
34f80b04
EG
1456 u8 cqe_fp_flags;
1457 u16 len, pad;
a2fbb9ea
ET
1458
1459 comp_ring_cons = RCQ_BD(sw_comp_cons);
1460 bd_prod = RX_BD(bd_prod);
1461 bd_cons = RX_BD(bd_cons);
1462
1463 cqe = &fp->rx_comp_ring[comp_ring_cons];
34f80b04 1464 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
a2fbb9ea 1465
a2fbb9ea 1466 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
34f80b04
EG
1467 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1468 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
68d59484 1469 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
34f80b04
EG
1470 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1471 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
a2fbb9ea
ET
1472
1473 /* is this a slowpath msg? */
34f80b04 1474 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
a2fbb9ea
ET
1475 bnx2x_sp_event(fp, cqe);
1476 goto next_cqe;
1477
1478 /* this is an rx packet */
1479 } else {
1480 rx_buf = &fp->rx_buf_ring[bd_cons];
1481 skb = rx_buf->skb;
a2fbb9ea
ET
1482 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1483 pad = cqe->fast_path_cqe.placement_offset;
1484
7a9b2557
VZ
1485 /* If CQE is marked both TPA_START and TPA_END
1486 it is a non-TPA CQE */
1487 if ((!fp->disable_tpa) &&
1488 (TPA_TYPE(cqe_fp_flags) !=
1489 (TPA_TYPE_START | TPA_TYPE_END))) {
3196a88a 1490 u16 queue = cqe->fast_path_cqe.queue_index;
7a9b2557
VZ
1491
1492 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1493 DP(NETIF_MSG_RX_STATUS,
1494 "calling tpa_start on queue %d\n",
1495 queue);
1496
1497 bnx2x_tpa_start(fp, queue, skb,
1498 bd_cons, bd_prod);
1499 goto next_rx;
1500 }
1501
1502 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1503 DP(NETIF_MSG_RX_STATUS,
1504 "calling tpa_stop on queue %d\n",
1505 queue);
1506
1507 if (!BNX2X_RX_SUM_FIX(cqe))
1508 BNX2X_ERR("STOP on none TCP "
1509 "data\n");
1510
1511 /* This is a size of the linear data
1512 on this skb */
1513 len = le16_to_cpu(cqe->fast_path_cqe.
1514 len_on_bd);
1515 bnx2x_tpa_stop(bp, fp, queue, pad,
1516 len, cqe, comp_ring_cons);
1517#ifdef BNX2X_STOP_ON_ERROR
1518 if (bp->panic)
1519 return -EINVAL;
1520#endif
1521
1522 bnx2x_update_sge_prod(fp,
1523 &cqe->fast_path_cqe);
1524 goto next_cqe;
1525 }
1526 }
1527
a2fbb9ea
ET
1528 pci_dma_sync_single_for_device(bp->pdev,
1529 pci_unmap_addr(rx_buf, mapping),
1530 pad + RX_COPY_THRESH,
1531 PCI_DMA_FROMDEVICE);
1532 prefetch(skb);
1533 prefetch(((char *)(skb)) + 128);
1534
1535 /* is this an error packet? */
34f80b04 1536 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
a2fbb9ea 1537 DP(NETIF_MSG_RX_ERR,
34f80b04
EG
1538 "ERROR flags %x rx packet %u\n",
1539 cqe_fp_flags, sw_comp_cons);
de832a55 1540 fp->eth_q_stats.rx_err_discard_pkt++;
a2fbb9ea
ET
1541 goto reuse_rx;
1542 }
1543
1544 /* Since we don't have a jumbo ring
1545 * copy small packets if mtu > 1500
1546 */
1547 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1548 (len <= RX_COPY_THRESH)) {
1549 struct sk_buff *new_skb;
1550
1551 new_skb = netdev_alloc_skb(bp->dev,
1552 len + pad);
1553 if (new_skb == NULL) {
1554 DP(NETIF_MSG_RX_ERR,
34f80b04 1555 "ERROR packet dropped "
a2fbb9ea 1556 "because of alloc failure\n");
de832a55 1557 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1558 goto reuse_rx;
1559 }
1560
1561 /* aligned copy */
1562 skb_copy_from_linear_data_offset(skb, pad,
1563 new_skb->data + pad, len);
1564 skb_reserve(new_skb, pad);
1565 skb_put(new_skb, len);
1566
1567 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1568
1569 skb = new_skb;
1570
1571 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1572 pci_unmap_single(bp->pdev,
1573 pci_unmap_addr(rx_buf, mapping),
437cf2f1 1574 bp->rx_buf_size,
a2fbb9ea
ET
1575 PCI_DMA_FROMDEVICE);
1576 skb_reserve(skb, pad);
1577 skb_put(skb, len);
1578
1579 } else {
1580 DP(NETIF_MSG_RX_ERR,
34f80b04 1581 "ERROR packet dropped because "
a2fbb9ea 1582 "of alloc failure\n");
de832a55 1583 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
1584reuse_rx:
1585 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1586 goto next_rx;
1587 }
1588
1589 skb->protocol = eth_type_trans(skb, bp->dev);
1590
1591 skb->ip_summed = CHECKSUM_NONE;
66e855f3 1592 if (bp->rx_csum) {
1adcd8be
EG
1593 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1594 skb->ip_summed = CHECKSUM_UNNECESSARY;
66e855f3 1595 else
de832a55 1596 fp->eth_q_stats.hw_csum_err++;
66e855f3 1597 }
a2fbb9ea
ET
1598 }
1599
748e5439 1600 skb_record_rx_queue(skb, fp->index);
a2fbb9ea 1601#ifdef BCM_VLAN
0c6671b0 1602 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
34f80b04
EG
1603 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1604 PARSING_FLAGS_VLAN))
a2fbb9ea
ET
1605 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1606 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1607 else
1608#endif
34f80b04 1609 netif_receive_skb(skb);
a2fbb9ea 1610
a2fbb9ea
ET
1611
1612next_rx:
1613 rx_buf->skb = NULL;
1614
1615 bd_cons = NEXT_RX_IDX(bd_cons);
1616 bd_prod = NEXT_RX_IDX(bd_prod);
34f80b04
EG
1617 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1618 rx_pkt++;
a2fbb9ea
ET
1619next_cqe:
1620 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1621 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
a2fbb9ea 1622
34f80b04 1623 if (rx_pkt == budget)
a2fbb9ea
ET
1624 break;
1625 } /* while */
1626
1627 fp->rx_bd_cons = bd_cons;
34f80b04 1628 fp->rx_bd_prod = bd_prod_fw;
a2fbb9ea
ET
1629 fp->rx_comp_cons = sw_comp_cons;
1630 fp->rx_comp_prod = sw_comp_prod;
1631
7a9b2557
VZ
1632 /* Update producers */
1633 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1634 fp->rx_sge_prod);
a2fbb9ea
ET
1635
1636 fp->rx_pkt += rx_pkt;
1637 fp->rx_calls++;
1638
1639 return rx_pkt;
1640}
1641
1642static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1643{
1644 struct bnx2x_fastpath *fp = fp_cookie;
1645 struct bnx2x *bp = fp->bp;
34f80b04 1646 int index = FP_IDX(fp);
a2fbb9ea 1647
da5a662a
VZ
1648 /* Return here if interrupt is disabled */
1649 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1650 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1651 return IRQ_HANDLED;
1652 }
1653
34f80b04
EG
1654 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1655 index, FP_SB_ID(fp));
1656 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
1657
1658#ifdef BNX2X_STOP_ON_ERROR
1659 if (unlikely(bp->panic))
1660 return IRQ_HANDLED;
1661#endif
1662
1663 prefetch(fp->rx_cons_sb);
1664 prefetch(fp->tx_cons_sb);
1665 prefetch(&fp->status_blk->c_status_block.status_block_index);
1666 prefetch(&fp->status_blk->u_status_block.status_block_index);
1667
288379f0 1668 napi_schedule(&bnx2x_fp(bp, index, napi));
34f80b04 1669
a2fbb9ea
ET
1670 return IRQ_HANDLED;
1671}
1672
1673static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1674{
555f6c78 1675 struct bnx2x *bp = netdev_priv(dev_instance);
a2fbb9ea 1676 u16 status = bnx2x_ack_int(bp);
34f80b04 1677 u16 mask;
a2fbb9ea 1678
34f80b04 1679 /* Return here if interrupt is shared and it's not for us */
a2fbb9ea
ET
1680 if (unlikely(status == 0)) {
1681 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1682 return IRQ_NONE;
1683 }
34f80b04 1684 DP(NETIF_MSG_INTR, "got an interrupt status %u\n", status);
a2fbb9ea 1685
34f80b04 1686 /* Return here if interrupt is disabled */
a2fbb9ea
ET
1687 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1688 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1689 return IRQ_HANDLED;
1690 }
1691
3196a88a
EG
1692#ifdef BNX2X_STOP_ON_ERROR
1693 if (unlikely(bp->panic))
1694 return IRQ_HANDLED;
1695#endif
1696
34f80b04
EG
1697 mask = 0x2 << bp->fp[0].sb_id;
1698 if (status & mask) {
a2fbb9ea
ET
1699 struct bnx2x_fastpath *fp = &bp->fp[0];
1700
1701 prefetch(fp->rx_cons_sb);
1702 prefetch(fp->tx_cons_sb);
1703 prefetch(&fp->status_blk->c_status_block.status_block_index);
1704 prefetch(&fp->status_blk->u_status_block.status_block_index);
1705
288379f0 1706 napi_schedule(&bnx2x_fp(bp, 0, napi));
a2fbb9ea 1707
34f80b04 1708 status &= ~mask;
a2fbb9ea
ET
1709 }
1710
a2fbb9ea 1711
34f80b04 1712 if (unlikely(status & 0x1)) {
1cf167f2 1713 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
1714
1715 status &= ~0x1;
1716 if (!status)
1717 return IRQ_HANDLED;
1718 }
1719
34f80b04
EG
1720 if (status)
1721 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1722 status);
a2fbb9ea 1723
c18487ee 1724 return IRQ_HANDLED;
a2fbb9ea
ET
1725}
1726
c18487ee 1727/* end of fast path */
a2fbb9ea 1728
bb2a0f7a 1729static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
a2fbb9ea 1730
c18487ee
YR
1731/* Link */
1732
1733/*
1734 * General service functions
1735 */
a2fbb9ea 1736
4a37fb66 1737static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1738{
1739 u32 lock_status;
1740 u32 resource_bit = (1 << resource);
4a37fb66
YG
1741 int func = BP_FUNC(bp);
1742 u32 hw_lock_control_reg;
c18487ee 1743 int cnt;
a2fbb9ea 1744
c18487ee
YR
1745 /* Validating that the resource is within range */
1746 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1747 DP(NETIF_MSG_HW,
1748 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1749 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1750 return -EINVAL;
1751 }
a2fbb9ea 1752
4a37fb66
YG
1753 if (func <= 5) {
1754 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1755 } else {
1756 hw_lock_control_reg =
1757 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1758 }
1759
c18487ee 1760 /* Validating that the resource is not already taken */
4a37fb66 1761 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1762 if (lock_status & resource_bit) {
1763 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1764 lock_status, resource_bit);
1765 return -EEXIST;
1766 }
a2fbb9ea 1767
46230476
EG
1768 /* Try for 5 second every 5ms */
1769 for (cnt = 0; cnt < 1000; cnt++) {
c18487ee 1770 /* Try to acquire the lock */
4a37fb66
YG
1771 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1772 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1773 if (lock_status & resource_bit)
1774 return 0;
a2fbb9ea 1775
c18487ee 1776 msleep(5);
a2fbb9ea 1777 }
c18487ee
YR
1778 DP(NETIF_MSG_HW, "Timeout\n");
1779 return -EAGAIN;
1780}
a2fbb9ea 1781
4a37fb66 1782static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
c18487ee
YR
1783{
1784 u32 lock_status;
1785 u32 resource_bit = (1 << resource);
4a37fb66
YG
1786 int func = BP_FUNC(bp);
1787 u32 hw_lock_control_reg;
a2fbb9ea 1788
c18487ee
YR
1789 /* Validating that the resource is within range */
1790 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1791 DP(NETIF_MSG_HW,
1792 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1793 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1794 return -EINVAL;
1795 }
1796
4a37fb66
YG
1797 if (func <= 5) {
1798 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1799 } else {
1800 hw_lock_control_reg =
1801 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1802 }
1803
c18487ee 1804 /* Validating that the resource is currently taken */
4a37fb66 1805 lock_status = REG_RD(bp, hw_lock_control_reg);
c18487ee
YR
1806 if (!(lock_status & resource_bit)) {
1807 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1808 lock_status, resource_bit);
1809 return -EFAULT;
a2fbb9ea
ET
1810 }
1811
4a37fb66 1812 REG_WR(bp, hw_lock_control_reg, resource_bit);
c18487ee
YR
1813 return 0;
1814}
1815
1816/* HW Lock for shared dual port PHYs */
4a37fb66 1817static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
c18487ee
YR
1818{
1819 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1820
34f80b04 1821 mutex_lock(&bp->port.phy_mutex);
a2fbb9ea 1822
c18487ee
YR
1823 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1824 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1825 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
c18487ee 1826}
a2fbb9ea 1827
4a37fb66 1828static void bnx2x_release_phy_lock(struct bnx2x *bp)
c18487ee
YR
1829{
1830 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea 1831
c18487ee
YR
1832 if ((ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072) ||
1833 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073))
4a37fb66 1834 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_8072_MDIO);
a2fbb9ea 1835
34f80b04 1836 mutex_unlock(&bp->port.phy_mutex);
c18487ee 1837}
a2fbb9ea 1838
4acac6a5
EG
1839int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1840{
1841 /* The GPIO should be swapped if swap register is set and active */
1842 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1843 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1844 int gpio_shift = gpio_num +
1845 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1846 u32 gpio_mask = (1 << gpio_shift);
1847 u32 gpio_reg;
1848 int value;
1849
1850 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1851 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1852 return -EINVAL;
1853 }
1854
1855 /* read GPIO value */
1856 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1857
1858 /* get the requested pin value */
1859 if ((gpio_reg & gpio_mask) == gpio_mask)
1860 value = 1;
1861 else
1862 value = 0;
1863
1864 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1865
1866 return value;
1867}
1868
17de50b7 1869int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
c18487ee
YR
1870{
1871 /* The GPIO should be swapped if swap register is set and active */
1872 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
17de50b7 1873 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
c18487ee
YR
1874 int gpio_shift = gpio_num +
1875 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1876 u32 gpio_mask = (1 << gpio_shift);
1877 u32 gpio_reg;
a2fbb9ea 1878
c18487ee
YR
1879 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1880 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1881 return -EINVAL;
1882 }
a2fbb9ea 1883
4a37fb66 1884 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
c18487ee
YR
1885 /* read GPIO and mask except the float bits */
1886 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
a2fbb9ea 1887
c18487ee
YR
1888 switch (mode) {
1889 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1890 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1891 gpio_num, gpio_shift);
1892 /* clear FLOAT and set CLR */
1893 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1894 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1895 break;
a2fbb9ea 1896
c18487ee
YR
1897 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1898 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1899 gpio_num, gpio_shift);
1900 /* clear FLOAT and set SET */
1901 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1902 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1903 break;
a2fbb9ea 1904
17de50b7 1905 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
c18487ee
YR
1906 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1907 gpio_num, gpio_shift);
1908 /* set FLOAT */
1909 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1910 break;
a2fbb9ea 1911
c18487ee
YR
1912 default:
1913 break;
a2fbb9ea
ET
1914 }
1915
c18487ee 1916 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
4a37fb66 1917 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
f1410647 1918
c18487ee 1919 return 0;
a2fbb9ea
ET
1920}
1921
4acac6a5
EG
1922int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1923{
1924 /* The GPIO should be swapped if swap register is set and active */
1925 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1926 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1927 int gpio_shift = gpio_num +
1928 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1929 u32 gpio_mask = (1 << gpio_shift);
1930 u32 gpio_reg;
1931
1932 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1933 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1934 return -EINVAL;
1935 }
1936
1937 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1938 /* read GPIO int */
1939 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1940
1941 switch (mode) {
1942 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1943 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1944 "output low\n", gpio_num, gpio_shift);
1945 /* clear SET and set CLR */
1946 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1947 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1948 break;
1949
1950 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1951 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1952 "output high\n", gpio_num, gpio_shift);
1953 /* clear CLR and set SET */
1954 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1955 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1956 break;
1957
1958 default:
1959 break;
1960 }
1961
1962 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1963 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1964
1965 return 0;
1966}
1967
c18487ee 1968static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
a2fbb9ea 1969{
c18487ee
YR
1970 u32 spio_mask = (1 << spio_num);
1971 u32 spio_reg;
a2fbb9ea 1972
c18487ee
YR
1973 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1974 (spio_num > MISC_REGISTERS_SPIO_7)) {
1975 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1976 return -EINVAL;
a2fbb9ea
ET
1977 }
1978
4a37fb66 1979 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee
YR
1980 /* read SPIO and mask except the float bits */
1981 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
a2fbb9ea 1982
c18487ee 1983 switch (mode) {
6378c025 1984 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
c18487ee
YR
1985 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1986 /* clear FLOAT and set CLR */
1987 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1988 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1989 break;
a2fbb9ea 1990
6378c025 1991 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
c18487ee
YR
1992 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1993 /* clear FLOAT and set SET */
1994 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1995 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1996 break;
a2fbb9ea 1997
c18487ee
YR
1998 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1999 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2000 /* set FLOAT */
2001 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2002 break;
a2fbb9ea 2003
c18487ee
YR
2004 default:
2005 break;
a2fbb9ea
ET
2006 }
2007
c18487ee 2008 REG_WR(bp, MISC_REG_SPIO, spio_reg);
4a37fb66 2009 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
c18487ee 2010
a2fbb9ea
ET
2011 return 0;
2012}
2013
c18487ee 2014static void bnx2x_calc_fc_adv(struct bnx2x *bp)
a2fbb9ea 2015{
ad33ea3a
EG
2016 switch (bp->link_vars.ieee_fc &
2017 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
c18487ee 2018 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
34f80b04 2019 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2020 ADVERTISED_Pause);
2021 break;
2022 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
34f80b04 2023 bp->port.advertising |= (ADVERTISED_Asym_Pause |
c18487ee
YR
2024 ADVERTISED_Pause);
2025 break;
2026 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
34f80b04 2027 bp->port.advertising |= ADVERTISED_Asym_Pause;
c18487ee
YR
2028 break;
2029 default:
34f80b04 2030 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
c18487ee
YR
2031 ADVERTISED_Pause);
2032 break;
2033 }
2034}
f1410647 2035
c18487ee
YR
2036static void bnx2x_link_report(struct bnx2x *bp)
2037{
2038 if (bp->link_vars.link_up) {
2039 if (bp->state == BNX2X_STATE_OPEN)
2040 netif_carrier_on(bp->dev);
2041 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
f1410647 2042
c18487ee 2043 printk("%d Mbps ", bp->link_vars.line_speed);
f1410647 2044
c18487ee
YR
2045 if (bp->link_vars.duplex == DUPLEX_FULL)
2046 printk("full duplex");
2047 else
2048 printk("half duplex");
f1410647 2049
c0700f90
DM
2050 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2051 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
c18487ee 2052 printk(", receive ");
c0700f90 2053 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
c18487ee
YR
2054 printk("& transmit ");
2055 } else {
2056 printk(", transmit ");
2057 }
2058 printk("flow control ON");
2059 }
2060 printk("\n");
f1410647 2061
c18487ee
YR
2062 } else { /* link_down */
2063 netif_carrier_off(bp->dev);
2064 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
f1410647 2065 }
c18487ee
YR
2066}
2067
2068static u8 bnx2x_initial_phy_init(struct bnx2x *bp)
2069{
19680c48
EG
2070 if (!BP_NOMCP(bp)) {
2071 u8 rc;
a2fbb9ea 2072
19680c48 2073 /* Initialize link parameters structure variables */
8c99e7b0
YR
2074 /* It is recommended to turn off RX FC for jumbo frames
2075 for better performance */
2076 if (IS_E1HMF(bp))
c0700f90 2077 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
8c99e7b0 2078 else if (bp->dev->mtu > 5000)
c0700f90 2079 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
8c99e7b0 2080 else
c0700f90 2081 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
a2fbb9ea 2082
4a37fb66 2083 bnx2x_acquire_phy_lock(bp);
19680c48 2084 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2085 bnx2x_release_phy_lock(bp);
a2fbb9ea 2086
3c96c68b
EG
2087 bnx2x_calc_fc_adv(bp);
2088
19680c48
EG
2089 if (bp->link_vars.link_up)
2090 bnx2x_link_report(bp);
a2fbb9ea 2091
34f80b04 2092
19680c48
EG
2093 return rc;
2094 }
2095 BNX2X_ERR("Bootcode is missing -not initializing link\n");
2096 return -EINVAL;
a2fbb9ea
ET
2097}
2098
c18487ee 2099static void bnx2x_link_set(struct bnx2x *bp)
a2fbb9ea 2100{
19680c48 2101 if (!BP_NOMCP(bp)) {
4a37fb66 2102 bnx2x_acquire_phy_lock(bp);
19680c48 2103 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
4a37fb66 2104 bnx2x_release_phy_lock(bp);
a2fbb9ea 2105
19680c48
EG
2106 bnx2x_calc_fc_adv(bp);
2107 } else
2108 BNX2X_ERR("Bootcode is missing -not setting link\n");
c18487ee 2109}
a2fbb9ea 2110
c18487ee
YR
2111static void bnx2x__link_reset(struct bnx2x *bp)
2112{
19680c48 2113 if (!BP_NOMCP(bp)) {
4a37fb66 2114 bnx2x_acquire_phy_lock(bp);
19680c48 2115 bnx2x_link_reset(&bp->link_params, &bp->link_vars);
4a37fb66 2116 bnx2x_release_phy_lock(bp);
19680c48
EG
2117 } else
2118 BNX2X_ERR("Bootcode is missing -not resetting link\n");
c18487ee 2119}
a2fbb9ea 2120
c18487ee
YR
2121static u8 bnx2x_link_test(struct bnx2x *bp)
2122{
2123 u8 rc;
a2fbb9ea 2124
4a37fb66 2125 bnx2x_acquire_phy_lock(bp);
c18487ee 2126 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
4a37fb66 2127 bnx2x_release_phy_lock(bp);
a2fbb9ea 2128
c18487ee
YR
2129 return rc;
2130}
a2fbb9ea 2131
8a1c38d1 2132static void bnx2x_init_port_minmax(struct bnx2x *bp)
34f80b04 2133{
8a1c38d1
EG
2134 u32 r_param = bp->link_vars.line_speed / 8;
2135 u32 fair_periodic_timeout_usec;
2136 u32 t_fair;
34f80b04 2137
8a1c38d1
EG
2138 memset(&(bp->cmng.rs_vars), 0,
2139 sizeof(struct rate_shaping_vars_per_port));
2140 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
34f80b04 2141
8a1c38d1
EG
2142 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2143 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
34f80b04 2144
8a1c38d1
EG
2145 /* this is the threshold below which no timer arming will occur
2146 1.25 coefficient is for the threshold to be a little bigger
2147 than the real time, to compensate for timer in-accuracy */
2148 bp->cmng.rs_vars.rs_threshold =
34f80b04
EG
2149 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2150
8a1c38d1
EG
2151 /* resolution of fairness timer */
2152 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2153 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2154 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
34f80b04 2155
8a1c38d1
EG
2156 /* this is the threshold below which we won't arm the timer anymore */
2157 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
34f80b04 2158
8a1c38d1
EG
2159 /* we multiply by 1e3/8 to get bytes/msec.
2160 We don't want the credits to pass a credit
2161 of the t_fair*FAIR_MEM (algorithm resolution) */
2162 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2163 /* since each tick is 4 usec */
2164 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
34f80b04
EG
2165}
2166
8a1c38d1 2167static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
34f80b04
EG
2168{
2169 struct rate_shaping_vars_per_vn m_rs_vn;
2170 struct fairness_vars_per_vn m_fair_vn;
2171 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2172 u16 vn_min_rate, vn_max_rate;
2173 int i;
2174
2175 /* If function is hidden - set min and max to zeroes */
2176 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2177 vn_min_rate = 0;
2178 vn_max_rate = 0;
2179
2180 } else {
2181 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2182 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
8a1c38d1 2183 /* If fairness is enabled (not all min rates are zeroes) and
34f80b04 2184 if current min rate is zero - set it to 1.
33471629 2185 This is a requirement of the algorithm. */
8a1c38d1 2186 if (bp->vn_weight_sum && (vn_min_rate == 0))
34f80b04
EG
2187 vn_min_rate = DEF_MIN_RATE;
2188 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2189 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2190 }
2191
8a1c38d1
EG
2192 DP(NETIF_MSG_IFUP,
2193 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2194 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
34f80b04
EG
2195
2196 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2197 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2198
2199 /* global vn counter - maximal Mbps for this vn */
2200 m_rs_vn.vn_counter.rate = vn_max_rate;
2201
2202 /* quota - number of bytes transmitted in this period */
2203 m_rs_vn.vn_counter.quota =
2204 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2205
8a1c38d1 2206 if (bp->vn_weight_sum) {
34f80b04
EG
2207 /* credit for each period of the fairness algorithm:
2208 number of bytes in T_FAIR (the vn share the port rate).
8a1c38d1
EG
2209 vn_weight_sum should not be larger than 10000, thus
2210 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2211 than zero */
34f80b04 2212 m_fair_vn.vn_credit_delta =
8a1c38d1
EG
2213 max((u32)(vn_min_rate * (T_FAIR_COEF /
2214 (8 * bp->vn_weight_sum))),
2215 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
34f80b04
EG
2216 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2217 m_fair_vn.vn_credit_delta);
2218 }
2219
34f80b04
EG
2220 /* Store it to internal memory */
2221 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2222 REG_WR(bp, BAR_XSTRORM_INTMEM +
2223 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2224 ((u32 *)(&m_rs_vn))[i]);
2225
2226 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2227 REG_WR(bp, BAR_XSTRORM_INTMEM +
2228 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2229 ((u32 *)(&m_fair_vn))[i]);
2230}
2231
8a1c38d1 2232
c18487ee
YR
2233/* This function is called upon link interrupt */
2234static void bnx2x_link_attn(struct bnx2x *bp)
2235{
bb2a0f7a
YG
2236 /* Make sure that we are synced with the current statistics */
2237 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2238
c18487ee 2239 bnx2x_link_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2240
bb2a0f7a
YG
2241 if (bp->link_vars.link_up) {
2242
1c06328c
EG
2243 /* dropless flow control */
2244 if (CHIP_IS_E1H(bp)) {
2245 int port = BP_PORT(bp);
2246 u32 pause_enabled = 0;
2247
2248 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2249 pause_enabled = 1;
2250
2251 REG_WR(bp, BAR_USTRORM_INTMEM +
2252 USTORM_PAUSE_ENABLED_OFFSET(port),
2253 pause_enabled);
2254 }
2255
bb2a0f7a
YG
2256 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2257 struct host_port_stats *pstats;
2258
2259 pstats = bnx2x_sp(bp, port_stats);
2260 /* reset old bmac stats */
2261 memset(&(pstats->mac_stx[0]), 0,
2262 sizeof(struct mac_stx));
2263 }
2264 if ((bp->state == BNX2X_STATE_OPEN) ||
2265 (bp->state == BNX2X_STATE_DISABLED))
2266 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2267 }
2268
c18487ee
YR
2269 /* indicate link status */
2270 bnx2x_link_report(bp);
34f80b04
EG
2271
2272 if (IS_E1HMF(bp)) {
8a1c38d1 2273 int port = BP_PORT(bp);
34f80b04 2274 int func;
8a1c38d1 2275 int vn;
34f80b04
EG
2276
2277 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2278 if (vn == BP_E1HVN(bp))
2279 continue;
2280
8a1c38d1 2281 func = ((vn << 1) | port);
34f80b04
EG
2282
2283 /* Set the attention towards other drivers
2284 on the same port */
2285 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2286 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2287 }
34f80b04 2288
8a1c38d1
EG
2289 if (bp->link_vars.link_up) {
2290 int i;
2291
2292 /* Init rate shaping and fairness contexts */
2293 bnx2x_init_port_minmax(bp);
34f80b04 2294
34f80b04 2295 for (vn = VN_0; vn < E1HVN_MAX; vn++)
8a1c38d1
EG
2296 bnx2x_init_vn_minmax(bp, 2*vn + port);
2297
2298 /* Store it to internal memory */
2299 for (i = 0;
2300 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2301 REG_WR(bp, BAR_XSTRORM_INTMEM +
2302 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2303 ((u32 *)(&bp->cmng))[i]);
2304 }
34f80b04 2305 }
c18487ee 2306}
a2fbb9ea 2307
c18487ee
YR
2308static void bnx2x__link_status_update(struct bnx2x *bp)
2309{
2310 if (bp->state != BNX2X_STATE_OPEN)
2311 return;
a2fbb9ea 2312
c18487ee 2313 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
a2fbb9ea 2314
bb2a0f7a
YG
2315 if (bp->link_vars.link_up)
2316 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2317 else
2318 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2319
c18487ee
YR
2320 /* indicate link status */
2321 bnx2x_link_report(bp);
a2fbb9ea 2322}
a2fbb9ea 2323
34f80b04
EG
2324static void bnx2x_pmf_update(struct bnx2x *bp)
2325{
2326 int port = BP_PORT(bp);
2327 u32 val;
2328
2329 bp->port.pmf = 1;
2330 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2331
2332 /* enable nig attention */
2333 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2334 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2335 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
bb2a0f7a
YG
2336
2337 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
34f80b04
EG
2338}
2339
c18487ee 2340/* end of Link */
a2fbb9ea
ET
2341
2342/* slow path */
2343
2344/*
2345 * General service functions
2346 */
2347
2348/* the slow path queue is odd since completions arrive on the fastpath ring */
2349static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2350 u32 data_hi, u32 data_lo, int common)
2351{
34f80b04 2352 int func = BP_FUNC(bp);
a2fbb9ea 2353
34f80b04
EG
2354 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2355 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
a2fbb9ea
ET
2356 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2357 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2358 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2359
2360#ifdef BNX2X_STOP_ON_ERROR
2361 if (unlikely(bp->panic))
2362 return -EIO;
2363#endif
2364
34f80b04 2365 spin_lock_bh(&bp->spq_lock);
a2fbb9ea
ET
2366
2367 if (!bp->spq_left) {
2368 BNX2X_ERR("BUG! SPQ ring full!\n");
34f80b04 2369 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2370 bnx2x_panic();
2371 return -EBUSY;
2372 }
f1410647 2373
a2fbb9ea
ET
2374 /* CID needs port number to be encoded int it */
2375 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2376 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2377 HW_CID(bp, cid)));
2378 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2379 if (common)
2380 bp->spq_prod_bd->hdr.type |=
2381 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2382
2383 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2384 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2385
2386 bp->spq_left--;
2387
2388 if (bp->spq_prod_bd == bp->spq_last_bd) {
2389 bp->spq_prod_bd = bp->spq;
2390 bp->spq_prod_idx = 0;
2391 DP(NETIF_MSG_TIMER, "end of spq\n");
2392
2393 } else {
2394 bp->spq_prod_bd++;
2395 bp->spq_prod_idx++;
2396 }
2397
34f80b04 2398 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
2399 bp->spq_prod_idx);
2400
34f80b04 2401 spin_unlock_bh(&bp->spq_lock);
a2fbb9ea
ET
2402 return 0;
2403}
2404
2405/* acquire split MCP access lock register */
4a37fb66 2406static int bnx2x_acquire_alr(struct bnx2x *bp)
a2fbb9ea 2407{
a2fbb9ea 2408 u32 i, j, val;
34f80b04 2409 int rc = 0;
a2fbb9ea
ET
2410
2411 might_sleep();
2412 i = 100;
2413 for (j = 0; j < i*10; j++) {
2414 val = (1UL << 31);
2415 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2416 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2417 if (val & (1L << 31))
2418 break;
2419
2420 msleep(5);
2421 }
a2fbb9ea 2422 if (!(val & (1L << 31))) {
19680c48 2423 BNX2X_ERR("Cannot acquire MCP access lock register\n");
a2fbb9ea
ET
2424 rc = -EBUSY;
2425 }
2426
2427 return rc;
2428}
2429
4a37fb66
YG
2430/* release split MCP access lock register */
2431static void bnx2x_release_alr(struct bnx2x *bp)
a2fbb9ea
ET
2432{
2433 u32 val = 0;
2434
2435 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2436}
2437
2438static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2439{
2440 struct host_def_status_block *def_sb = bp->def_status_blk;
2441 u16 rc = 0;
2442
2443 barrier(); /* status block is written to by the chip */
a2fbb9ea
ET
2444 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2445 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2446 rc |= 1;
2447 }
2448 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2449 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2450 rc |= 2;
2451 }
2452 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2453 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2454 rc |= 4;
2455 }
2456 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2457 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2458 rc |= 8;
2459 }
2460 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2461 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2462 rc |= 16;
2463 }
2464 return rc;
2465}
2466
2467/*
2468 * slow path service functions
2469 */
2470
2471static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2472{
34f80b04 2473 int port = BP_PORT(bp);
5c862848
EG
2474 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2475 COMMAND_REG_ATTN_BITS_SET);
a2fbb9ea
ET
2476 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2477 MISC_REG_AEU_MASK_ATTN_FUNC_0;
877e9aa4
ET
2478 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2479 NIG_REG_MASK_INTERRUPT_PORT0;
3fcaf2e5 2480 u32 aeu_mask;
87942b46 2481 u32 nig_mask = 0;
a2fbb9ea 2482
a2fbb9ea
ET
2483 if (bp->attn_state & asserted)
2484 BNX2X_ERR("IGU ERROR\n");
2485
3fcaf2e5
EG
2486 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2487 aeu_mask = REG_RD(bp, aeu_addr);
2488
a2fbb9ea 2489 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
3fcaf2e5
EG
2490 aeu_mask, asserted);
2491 aeu_mask &= ~(asserted & 0xff);
2492 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2493
3fcaf2e5
EG
2494 REG_WR(bp, aeu_addr, aeu_mask);
2495 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea 2496
3fcaf2e5 2497 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
a2fbb9ea 2498 bp->attn_state |= asserted;
3fcaf2e5 2499 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
a2fbb9ea
ET
2500
2501 if (asserted & ATTN_HARD_WIRED_MASK) {
2502 if (asserted & ATTN_NIG_FOR_FUNC) {
a2fbb9ea 2503
a5e9a7cf
EG
2504 bnx2x_acquire_phy_lock(bp);
2505
877e9aa4 2506 /* save nig interrupt mask */
87942b46 2507 nig_mask = REG_RD(bp, nig_int_mask_addr);
877e9aa4 2508 REG_WR(bp, nig_int_mask_addr, 0);
a2fbb9ea 2509
c18487ee 2510 bnx2x_link_attn(bp);
a2fbb9ea
ET
2511
2512 /* handle unicore attn? */
2513 }
2514 if (asserted & ATTN_SW_TIMER_4_FUNC)
2515 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2516
2517 if (asserted & GPIO_2_FUNC)
2518 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2519
2520 if (asserted & GPIO_3_FUNC)
2521 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2522
2523 if (asserted & GPIO_4_FUNC)
2524 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2525
2526 if (port == 0) {
2527 if (asserted & ATTN_GENERAL_ATTN_1) {
2528 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2529 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2530 }
2531 if (asserted & ATTN_GENERAL_ATTN_2) {
2532 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2533 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2534 }
2535 if (asserted & ATTN_GENERAL_ATTN_3) {
2536 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2537 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2538 }
2539 } else {
2540 if (asserted & ATTN_GENERAL_ATTN_4) {
2541 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2542 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2543 }
2544 if (asserted & ATTN_GENERAL_ATTN_5) {
2545 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2546 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2547 }
2548 if (asserted & ATTN_GENERAL_ATTN_6) {
2549 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2550 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2551 }
2552 }
2553
2554 } /* if hardwired */
2555
5c862848
EG
2556 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2557 asserted, hc_addr);
2558 REG_WR(bp, hc_addr, asserted);
a2fbb9ea
ET
2559
2560 /* now set back the mask */
a5e9a7cf 2561 if (asserted & ATTN_NIG_FOR_FUNC) {
87942b46 2562 REG_WR(bp, nig_int_mask_addr, nig_mask);
a5e9a7cf
EG
2563 bnx2x_release_phy_lock(bp);
2564 }
a2fbb9ea
ET
2565}
2566
877e9aa4 2567static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
a2fbb9ea 2568{
34f80b04 2569 int port = BP_PORT(bp);
877e9aa4
ET
2570 int reg_offset;
2571 u32 val;
2572
34f80b04
EG
2573 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2574 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
877e9aa4 2575
34f80b04 2576 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
877e9aa4
ET
2577
2578 val = REG_RD(bp, reg_offset);
2579 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2580 REG_WR(bp, reg_offset, val);
2581
2582 BNX2X_ERR("SPIO5 hw attention\n");
2583
35b19ba5
EG
2584 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2585 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
877e9aa4
ET
2586 /* Fan failure attention */
2587
17de50b7 2588 /* The PHY reset is controlled by GPIO 1 */
877e9aa4 2589 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
17de50b7
EG
2590 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2591 /* Low power mode is controlled by GPIO 2 */
877e9aa4 2592 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
17de50b7 2593 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
877e9aa4 2594 /* mark the failure */
c18487ee 2595 bp->link_params.ext_phy_config &=
877e9aa4 2596 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
c18487ee 2597 bp->link_params.ext_phy_config |=
877e9aa4
ET
2598 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2599 SHMEM_WR(bp,
2600 dev_info.port_hw_config[port].
2601 external_phy_config,
c18487ee 2602 bp->link_params.ext_phy_config);
877e9aa4
ET
2603 /* log the failure */
2604 printk(KERN_ERR PFX "Fan Failure on Network"
2605 " Controller %s has caused the driver to"
2606 " shutdown the card to prevent permanent"
2607 " damage. Please contact Dell Support for"
2608 " assistance\n", bp->dev->name);
2609 break;
2610
2611 default:
2612 break;
2613 }
2614 }
34f80b04
EG
2615
2616 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2617
2618 val = REG_RD(bp, reg_offset);
2619 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2620 REG_WR(bp, reg_offset, val);
2621
2622 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2623 (attn & HW_INTERRUT_ASSERT_SET_0));
2624 bnx2x_panic();
2625 }
877e9aa4
ET
2626}
2627
2628static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2629{
2630 u32 val;
2631
2632 if (attn & BNX2X_DOORQ_ASSERT) {
2633
2634 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2635 BNX2X_ERR("DB hw attention 0x%x\n", val);
2636 /* DORQ discard attention */
2637 if (val & 0x2)
2638 BNX2X_ERR("FATAL error from DORQ\n");
2639 }
34f80b04
EG
2640
2641 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2642
2643 int port = BP_PORT(bp);
2644 int reg_offset;
2645
2646 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2647 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2648
2649 val = REG_RD(bp, reg_offset);
2650 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2651 REG_WR(bp, reg_offset, val);
2652
2653 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2654 (attn & HW_INTERRUT_ASSERT_SET_1));
2655 bnx2x_panic();
2656 }
877e9aa4
ET
2657}
2658
2659static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2660{
2661 u32 val;
2662
2663 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2664
2665 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2666 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2667 /* CFC error attention */
2668 if (val & 0x2)
2669 BNX2X_ERR("FATAL error from CFC\n");
2670 }
2671
2672 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2673
2674 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2675 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2676 /* RQ_USDMDP_FIFO_OVERFLOW */
2677 if (val & 0x18000)
2678 BNX2X_ERR("FATAL error from PXP\n");
2679 }
34f80b04
EG
2680
2681 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2682
2683 int port = BP_PORT(bp);
2684 int reg_offset;
2685
2686 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2687 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2688
2689 val = REG_RD(bp, reg_offset);
2690 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2691 REG_WR(bp, reg_offset, val);
2692
2693 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2694 (attn & HW_INTERRUT_ASSERT_SET_2));
2695 bnx2x_panic();
2696 }
877e9aa4
ET
2697}
2698
2699static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2700{
34f80b04
EG
2701 u32 val;
2702
877e9aa4
ET
2703 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2704
34f80b04
EG
2705 if (attn & BNX2X_PMF_LINK_ASSERT) {
2706 int func = BP_FUNC(bp);
2707
2708 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2709 bnx2x__link_status_update(bp);
2710 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2711 DRV_STATUS_PMF)
2712 bnx2x_pmf_update(bp);
2713
2714 } else if (attn & BNX2X_MC_ASSERT_BITS) {
877e9aa4
ET
2715
2716 BNX2X_ERR("MC assert!\n");
2717 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2718 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2719 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2720 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2721 bnx2x_panic();
2722
2723 } else if (attn & BNX2X_MCP_ASSERT) {
2724
2725 BNX2X_ERR("MCP assert!\n");
2726 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
34f80b04 2727 bnx2x_fw_dump(bp);
877e9aa4
ET
2728
2729 } else
2730 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2731 }
2732
2733 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
34f80b04
EG
2734 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2735 if (attn & BNX2X_GRC_TIMEOUT) {
2736 val = CHIP_IS_E1H(bp) ?
2737 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2738 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2739 }
2740 if (attn & BNX2X_GRC_RSV) {
2741 val = CHIP_IS_E1H(bp) ?
2742 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2743 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2744 }
877e9aa4 2745 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
877e9aa4
ET
2746 }
2747}
2748
2749static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2750{
a2fbb9ea
ET
2751 struct attn_route attn;
2752 struct attn_route group_mask;
34f80b04 2753 int port = BP_PORT(bp);
877e9aa4 2754 int index;
a2fbb9ea
ET
2755 u32 reg_addr;
2756 u32 val;
3fcaf2e5 2757 u32 aeu_mask;
a2fbb9ea
ET
2758
2759 /* need to take HW lock because MCP or other port might also
2760 try to handle this event */
4a37fb66 2761 bnx2x_acquire_alr(bp);
a2fbb9ea
ET
2762
2763 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2764 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2765 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2766 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
34f80b04
EG
2767 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2768 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
a2fbb9ea
ET
2769
2770 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2771 if (deasserted & (1 << index)) {
2772 group_mask = bp->attn_group[index];
2773
34f80b04
EG
2774 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2775 index, group_mask.sig[0], group_mask.sig[1],
2776 group_mask.sig[2], group_mask.sig[3]);
a2fbb9ea 2777
877e9aa4
ET
2778 bnx2x_attn_int_deasserted3(bp,
2779 attn.sig[3] & group_mask.sig[3]);
2780 bnx2x_attn_int_deasserted1(bp,
2781 attn.sig[1] & group_mask.sig[1]);
2782 bnx2x_attn_int_deasserted2(bp,
2783 attn.sig[2] & group_mask.sig[2]);
2784 bnx2x_attn_int_deasserted0(bp,
2785 attn.sig[0] & group_mask.sig[0]);
a2fbb9ea 2786
a2fbb9ea
ET
2787 if ((attn.sig[0] & group_mask.sig[0] &
2788 HW_PRTY_ASSERT_SET_0) ||
2789 (attn.sig[1] & group_mask.sig[1] &
2790 HW_PRTY_ASSERT_SET_1) ||
2791 (attn.sig[2] & group_mask.sig[2] &
2792 HW_PRTY_ASSERT_SET_2))
6378c025 2793 BNX2X_ERR("FATAL HW block parity attention\n");
a2fbb9ea
ET
2794 }
2795 }
2796
4a37fb66 2797 bnx2x_release_alr(bp);
a2fbb9ea 2798
5c862848 2799 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
a2fbb9ea
ET
2800
2801 val = ~deasserted;
3fcaf2e5
EG
2802 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2803 val, reg_addr);
5c862848 2804 REG_WR(bp, reg_addr, val);
a2fbb9ea 2805
a2fbb9ea 2806 if (~bp->attn_state & deasserted)
3fcaf2e5 2807 BNX2X_ERR("IGU ERROR\n");
a2fbb9ea
ET
2808
2809 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2810 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2811
3fcaf2e5
EG
2812 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2813 aeu_mask = REG_RD(bp, reg_addr);
2814
2815 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2816 aeu_mask, deasserted);
2817 aeu_mask |= (deasserted & 0xff);
2818 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
a2fbb9ea 2819
3fcaf2e5
EG
2820 REG_WR(bp, reg_addr, aeu_mask);
2821 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
a2fbb9ea
ET
2822
2823 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2824 bp->attn_state &= ~deasserted;
2825 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2826}
2827
2828static void bnx2x_attn_int(struct bnx2x *bp)
2829{
2830 /* read local copy of bits */
68d59484
EG
2831 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2832 attn_bits);
2833 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2834 attn_bits_ack);
a2fbb9ea
ET
2835 u32 attn_state = bp->attn_state;
2836
2837 /* look for changed bits */
2838 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2839 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2840
2841 DP(NETIF_MSG_HW,
2842 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2843 attn_bits, attn_ack, asserted, deasserted);
2844
2845 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
34f80b04 2846 BNX2X_ERR("BAD attention state\n");
a2fbb9ea
ET
2847
2848 /* handle bits that were raised */
2849 if (asserted)
2850 bnx2x_attn_int_asserted(bp, asserted);
2851
2852 if (deasserted)
2853 bnx2x_attn_int_deasserted(bp, deasserted);
2854}
2855
2856static void bnx2x_sp_task(struct work_struct *work)
2857{
1cf167f2 2858 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
a2fbb9ea
ET
2859 u16 status;
2860
34f80b04 2861
a2fbb9ea
ET
2862 /* Return here if interrupt is disabled */
2863 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2864 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2865 return;
2866 }
2867
2868 status = bnx2x_update_dsb_idx(bp);
34f80b04
EG
2869/* if (status == 0) */
2870/* BNX2X_ERR("spurious slowpath interrupt!\n"); */
a2fbb9ea 2871
3196a88a 2872 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
a2fbb9ea 2873
877e9aa4
ET
2874 /* HW attentions */
2875 if (status & 0x1)
a2fbb9ea 2876 bnx2x_attn_int(bp);
a2fbb9ea 2877
68d59484 2878 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
a2fbb9ea
ET
2879 IGU_INT_NOP, 1);
2880 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2881 IGU_INT_NOP, 1);
2882 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2883 IGU_INT_NOP, 1);
2884 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2885 IGU_INT_NOP, 1);
2886 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2887 IGU_INT_ENABLE, 1);
877e9aa4 2888
a2fbb9ea
ET
2889}
2890
2891static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2892{
2893 struct net_device *dev = dev_instance;
2894 struct bnx2x *bp = netdev_priv(dev);
2895
2896 /* Return here if interrupt is disabled */
2897 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
3196a88a 2898 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
a2fbb9ea
ET
2899 return IRQ_HANDLED;
2900 }
2901
8d9c5f34 2902 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
a2fbb9ea
ET
2903
2904#ifdef BNX2X_STOP_ON_ERROR
2905 if (unlikely(bp->panic))
2906 return IRQ_HANDLED;
2907#endif
2908
1cf167f2 2909 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
a2fbb9ea
ET
2910
2911 return IRQ_HANDLED;
2912}
2913
2914/* end of slow path */
2915
2916/* Statistics */
2917
2918/****************************************************************************
2919* Macros
2920****************************************************************************/
2921
a2fbb9ea
ET
2922/* sum[hi:lo] += add[hi:lo] */
2923#define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2924 do { \
2925 s_lo += a_lo; \
f5ba6772 2926 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
a2fbb9ea
ET
2927 } while (0)
2928
2929/* difference = minuend - subtrahend */
2930#define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
2931 do { \
bb2a0f7a
YG
2932 if (m_lo < s_lo) { \
2933 /* underflow */ \
a2fbb9ea 2934 d_hi = m_hi - s_hi; \
bb2a0f7a 2935 if (d_hi > 0) { \
6378c025 2936 /* we can 'loan' 1 */ \
a2fbb9ea
ET
2937 d_hi--; \
2938 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
bb2a0f7a 2939 } else { \
6378c025 2940 /* m_hi <= s_hi */ \
a2fbb9ea
ET
2941 d_hi = 0; \
2942 d_lo = 0; \
2943 } \
bb2a0f7a
YG
2944 } else { \
2945 /* m_lo >= s_lo */ \
a2fbb9ea 2946 if (m_hi < s_hi) { \
bb2a0f7a
YG
2947 d_hi = 0; \
2948 d_lo = 0; \
2949 } else { \
6378c025 2950 /* m_hi >= s_hi */ \
bb2a0f7a
YG
2951 d_hi = m_hi - s_hi; \
2952 d_lo = m_lo - s_lo; \
a2fbb9ea
ET
2953 } \
2954 } \
2955 } while (0)
2956
bb2a0f7a 2957#define UPDATE_STAT64(s, t) \
a2fbb9ea 2958 do { \
bb2a0f7a
YG
2959 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
2960 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
2961 pstats->mac_stx[0].t##_hi = new->s##_hi; \
2962 pstats->mac_stx[0].t##_lo = new->s##_lo; \
2963 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
2964 pstats->mac_stx[1].t##_lo, diff.lo); \
a2fbb9ea
ET
2965 } while (0)
2966
bb2a0f7a 2967#define UPDATE_STAT64_NIG(s, t) \
a2fbb9ea 2968 do { \
bb2a0f7a
YG
2969 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
2970 diff.lo, new->s##_lo, old->s##_lo); \
2971 ADD_64(estats->t##_hi, diff.hi, \
2972 estats->t##_lo, diff.lo); \
a2fbb9ea
ET
2973 } while (0)
2974
2975/* sum[hi:lo] += add */
2976#define ADD_EXTEND_64(s_hi, s_lo, a) \
2977 do { \
2978 s_lo += a; \
2979 s_hi += (s_lo < a) ? 1 : 0; \
2980 } while (0)
2981
bb2a0f7a 2982#define UPDATE_EXTEND_STAT(s) \
a2fbb9ea 2983 do { \
bb2a0f7a
YG
2984 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
2985 pstats->mac_stx[1].s##_lo, \
2986 new->s); \
a2fbb9ea
ET
2987 } while (0)
2988
bb2a0f7a 2989#define UPDATE_EXTEND_TSTAT(s, t) \
a2fbb9ea
ET
2990 do { \
2991 diff = le32_to_cpu(tclient->s) - old_tclient->s; \
2992 old_tclient->s = le32_to_cpu(tclient->s); \
de832a55
EG
2993 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
2994 } while (0)
2995
2996#define UPDATE_EXTEND_USTAT(s, t) \
2997 do { \
2998 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
2999 old_uclient->s = uclient->s; \
3000 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
bb2a0f7a
YG
3001 } while (0)
3002
3003#define UPDATE_EXTEND_XSTAT(s, t) \
3004 do { \
3005 diff = le32_to_cpu(xclient->s) - old_xclient->s; \
3006 old_xclient->s = le32_to_cpu(xclient->s); \
de832a55
EG
3007 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3008 } while (0)
3009
3010/* minuend -= subtrahend */
3011#define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3012 do { \
3013 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3014 } while (0)
3015
3016/* minuend[hi:lo] -= subtrahend */
3017#define SUB_EXTEND_64(m_hi, m_lo, s) \
3018 do { \
3019 SUB_64(m_hi, 0, m_lo, s); \
3020 } while (0)
3021
3022#define SUB_EXTEND_USTAT(s, t) \
3023 do { \
3024 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3025 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
a2fbb9ea
ET
3026 } while (0)
3027
3028/*
3029 * General service functions
3030 */
3031
3032static inline long bnx2x_hilo(u32 *hiref)
3033{
3034 u32 lo = *(hiref + 1);
3035#if (BITS_PER_LONG == 64)
3036 u32 hi = *hiref;
3037
3038 return HILO_U64(hi, lo);
3039#else
3040 return lo;
3041#endif
3042}
3043
3044/*
3045 * Init service functions
3046 */
3047
bb2a0f7a
YG
3048static void bnx2x_storm_stats_post(struct bnx2x *bp)
3049{
3050 if (!bp->stats_pending) {
3051 struct eth_query_ramrod_data ramrod_data = {0};
de832a55 3052 int i, rc;
bb2a0f7a
YG
3053
3054 ramrod_data.drv_counter = bp->stats_counter++;
8d9c5f34 3055 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
de832a55
EG
3056 for_each_queue(bp, i)
3057 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
bb2a0f7a
YG
3058
3059 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3060 ((u32 *)&ramrod_data)[1],
3061 ((u32 *)&ramrod_data)[0], 0);
3062 if (rc == 0) {
3063 /* stats ramrod has it's own slot on the spq */
3064 bp->spq_left++;
3065 bp->stats_pending = 1;
3066 }
3067 }
3068}
3069
3070static void bnx2x_stats_init(struct bnx2x *bp)
3071{
3072 int port = BP_PORT(bp);
de832a55 3073 int i;
bb2a0f7a 3074
de832a55 3075 bp->stats_pending = 0;
bb2a0f7a
YG
3076 bp->executer_idx = 0;
3077 bp->stats_counter = 0;
3078
3079 /* port stats */
3080 if (!BP_NOMCP(bp))
3081 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3082 else
3083 bp->port.port_stx = 0;
3084 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3085
3086 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3087 bp->port.old_nig_stats.brb_discard =
3088 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
66e855f3
YG
3089 bp->port.old_nig_stats.brb_truncate =
3090 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
bb2a0f7a
YG
3091 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3092 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3093 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3094 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3095
3096 /* function stats */
de832a55
EG
3097 for_each_queue(bp, i) {
3098 struct bnx2x_fastpath *fp = &bp->fp[i];
3099
3100 memset(&fp->old_tclient, 0,
3101 sizeof(struct tstorm_per_client_stats));
3102 memset(&fp->old_uclient, 0,
3103 sizeof(struct ustorm_per_client_stats));
3104 memset(&fp->old_xclient, 0,
3105 sizeof(struct xstorm_per_client_stats));
3106 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3107 }
3108
bb2a0f7a 3109 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
bb2a0f7a
YG
3110 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3111
3112 bp->stats_state = STATS_STATE_DISABLED;
3113 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3114 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3115}
3116
3117static void bnx2x_hw_stats_post(struct bnx2x *bp)
3118{
3119 struct dmae_command *dmae = &bp->stats_dmae;
3120 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3121
3122 *stats_comp = DMAE_COMP_VAL;
de832a55
EG
3123 if (CHIP_REV_IS_SLOW(bp))
3124 return;
bb2a0f7a
YG
3125
3126 /* loader */
3127 if (bp->executer_idx) {
3128 int loader_idx = PMF_DMAE_C(bp);
3129
3130 memset(dmae, 0, sizeof(struct dmae_command));
3131
3132 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3133 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3134 DMAE_CMD_DST_RESET |
3135#ifdef __BIG_ENDIAN
3136 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3137#else
3138 DMAE_CMD_ENDIANITY_DW_SWAP |
3139#endif
3140 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3141 DMAE_CMD_PORT_0) |
3142 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3143 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3144 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3145 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3146 sizeof(struct dmae_command) *
3147 (loader_idx + 1)) >> 2;
3148 dmae->dst_addr_hi = 0;
3149 dmae->len = sizeof(struct dmae_command) >> 2;
3150 if (CHIP_IS_E1(bp))
3151 dmae->len--;
3152 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3153 dmae->comp_addr_hi = 0;
3154 dmae->comp_val = 1;
3155
3156 *stats_comp = 0;
3157 bnx2x_post_dmae(bp, dmae, loader_idx);
3158
3159 } else if (bp->func_stx) {
3160 *stats_comp = 0;
3161 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3162 }
3163}
3164
3165static int bnx2x_stats_comp(struct bnx2x *bp)
3166{
3167 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3168 int cnt = 10;
3169
3170 might_sleep();
3171 while (*stats_comp != DMAE_COMP_VAL) {
bb2a0f7a
YG
3172 if (!cnt) {
3173 BNX2X_ERR("timeout waiting for stats finished\n");
3174 break;
3175 }
3176 cnt--;
12469401 3177 msleep(1);
bb2a0f7a
YG
3178 }
3179 return 1;
3180}
3181
3182/*
3183 * Statistics service functions
3184 */
3185
3186static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3187{
3188 struct dmae_command *dmae;
3189 u32 opcode;
3190 int loader_idx = PMF_DMAE_C(bp);
3191 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3192
3193 /* sanity */
3194 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3195 BNX2X_ERR("BUG!\n");
3196 return;
3197 }
3198
3199 bp->executer_idx = 0;
3200
3201 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3202 DMAE_CMD_C_ENABLE |
3203 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3204#ifdef __BIG_ENDIAN
3205 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3206#else
3207 DMAE_CMD_ENDIANITY_DW_SWAP |
3208#endif
3209 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3210 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3211
3212 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3213 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3214 dmae->src_addr_lo = bp->port.port_stx >> 2;
3215 dmae->src_addr_hi = 0;
3216 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3217 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3218 dmae->len = DMAE_LEN32_RD_MAX;
3219 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3220 dmae->comp_addr_hi = 0;
3221 dmae->comp_val = 1;
3222
3223 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3224 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3225 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3226 dmae->src_addr_hi = 0;
7a9b2557
VZ
3227 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3228 DMAE_LEN32_RD_MAX * 4);
3229 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3230 DMAE_LEN32_RD_MAX * 4);
bb2a0f7a
YG
3231 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3232 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3233 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3234 dmae->comp_val = DMAE_COMP_VAL;
3235
3236 *stats_comp = 0;
3237 bnx2x_hw_stats_post(bp);
3238 bnx2x_stats_comp(bp);
3239}
3240
3241static void bnx2x_port_stats_init(struct bnx2x *bp)
a2fbb9ea
ET
3242{
3243 struct dmae_command *dmae;
34f80b04 3244 int port = BP_PORT(bp);
bb2a0f7a 3245 int vn = BP_E1HVN(bp);
a2fbb9ea 3246 u32 opcode;
bb2a0f7a 3247 int loader_idx = PMF_DMAE_C(bp);
a2fbb9ea 3248 u32 mac_addr;
bb2a0f7a
YG
3249 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3250
3251 /* sanity */
3252 if (!bp->link_vars.link_up || !bp->port.pmf) {
3253 BNX2X_ERR("BUG!\n");
3254 return;
3255 }
a2fbb9ea
ET
3256
3257 bp->executer_idx = 0;
bb2a0f7a
YG
3258
3259 /* MCP */
3260 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3261 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3262 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 3263#ifdef __BIG_ENDIAN
bb2a0f7a 3264 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 3265#else
bb2a0f7a 3266 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 3267#endif
bb2a0f7a
YG
3268 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3269 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3270
bb2a0f7a 3271 if (bp->port.port_stx) {
a2fbb9ea
ET
3272
3273 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3274 dmae->opcode = opcode;
bb2a0f7a
YG
3275 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3276 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3277 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 3278 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
3279 dmae->len = sizeof(struct host_port_stats) >> 2;
3280 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3281 dmae->comp_addr_hi = 0;
3282 dmae->comp_val = 1;
a2fbb9ea
ET
3283 }
3284
bb2a0f7a
YG
3285 if (bp->func_stx) {
3286
3287 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3288 dmae->opcode = opcode;
3289 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3290 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3291 dmae->dst_addr_lo = bp->func_stx >> 2;
3292 dmae->dst_addr_hi = 0;
3293 dmae->len = sizeof(struct host_func_stats) >> 2;
3294 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3295 dmae->comp_addr_hi = 0;
3296 dmae->comp_val = 1;
a2fbb9ea
ET
3297 }
3298
bb2a0f7a 3299 /* MAC */
a2fbb9ea
ET
3300 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3301 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3302 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3303#ifdef __BIG_ENDIAN
3304 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3305#else
3306 DMAE_CMD_ENDIANITY_DW_SWAP |
3307#endif
bb2a0f7a
YG
3308 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3309 (vn << DMAE_CMD_E1HVN_SHIFT));
a2fbb9ea 3310
c18487ee 3311 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
a2fbb9ea
ET
3312
3313 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3314 NIG_REG_INGRESS_BMAC0_MEM);
3315
3316 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3317 BIGMAC_REGISTER_TX_STAT_GTBYT */
3318 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3319 dmae->opcode = opcode;
3320 dmae->src_addr_lo = (mac_addr +
3321 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3322 dmae->src_addr_hi = 0;
3323 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3324 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3325 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3326 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3327 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3328 dmae->comp_addr_hi = 0;
3329 dmae->comp_val = 1;
3330
3331 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3332 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3333 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3334 dmae->opcode = opcode;
3335 dmae->src_addr_lo = (mac_addr +
3336 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3337 dmae->src_addr_hi = 0;
3338 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3339 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea 3340 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3341 offsetof(struct bmac_stats, rx_stat_gr64_lo));
a2fbb9ea
ET
3342 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3343 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3344 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3345 dmae->comp_addr_hi = 0;
3346 dmae->comp_val = 1;
3347
c18487ee 3348 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
a2fbb9ea
ET
3349
3350 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3351
3352 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3353 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3354 dmae->opcode = opcode;
3355 dmae->src_addr_lo = (mac_addr +
3356 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3357 dmae->src_addr_hi = 0;
3358 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3359 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3360 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3361 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3362 dmae->comp_addr_hi = 0;
3363 dmae->comp_val = 1;
3364
3365 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3366 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3367 dmae->opcode = opcode;
3368 dmae->src_addr_lo = (mac_addr +
3369 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3370 dmae->src_addr_hi = 0;
3371 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3372 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea 3373 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3374 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
a2fbb9ea
ET
3375 dmae->len = 1;
3376 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3377 dmae->comp_addr_hi = 0;
3378 dmae->comp_val = 1;
3379
3380 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3381 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3382 dmae->opcode = opcode;
3383 dmae->src_addr_lo = (mac_addr +
3384 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3385 dmae->src_addr_hi = 0;
3386 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3387 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea 3388 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
bb2a0f7a 3389 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
a2fbb9ea
ET
3390 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3391 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3392 dmae->comp_addr_hi = 0;
3393 dmae->comp_val = 1;
3394 }
3395
3396 /* NIG */
bb2a0f7a
YG
3397 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3398 dmae->opcode = opcode;
3399 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3400 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3401 dmae->src_addr_hi = 0;
3402 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3403 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3404 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3405 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3406 dmae->comp_addr_hi = 0;
3407 dmae->comp_val = 1;
3408
3409 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3410 dmae->opcode = opcode;
3411 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3412 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3413 dmae->src_addr_hi = 0;
3414 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3415 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3416 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3417 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3418 dmae->len = (2*sizeof(u32)) >> 2;
3419 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3420 dmae->comp_addr_hi = 0;
3421 dmae->comp_val = 1;
3422
a2fbb9ea
ET
3423 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3424 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3425 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3426 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3427#ifdef __BIG_ENDIAN
3428 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3429#else
3430 DMAE_CMD_ENDIANITY_DW_SWAP |
3431#endif
bb2a0f7a
YG
3432 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3433 (vn << DMAE_CMD_E1HVN_SHIFT));
3434 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3435 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
a2fbb9ea 3436 dmae->src_addr_hi = 0;
bb2a0f7a
YG
3437 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3438 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3439 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3440 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3441 dmae->len = (2*sizeof(u32)) >> 2;
3442 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3443 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3444 dmae->comp_val = DMAE_COMP_VAL;
3445
3446 *stats_comp = 0;
a2fbb9ea
ET
3447}
3448
bb2a0f7a 3449static void bnx2x_func_stats_init(struct bnx2x *bp)
a2fbb9ea 3450{
bb2a0f7a
YG
3451 struct dmae_command *dmae = &bp->stats_dmae;
3452 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3453
bb2a0f7a
YG
3454 /* sanity */
3455 if (!bp->func_stx) {
3456 BNX2X_ERR("BUG!\n");
3457 return;
3458 }
a2fbb9ea 3459
bb2a0f7a
YG
3460 bp->executer_idx = 0;
3461 memset(dmae, 0, sizeof(struct dmae_command));
a2fbb9ea 3462
bb2a0f7a
YG
3463 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3464 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3465 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3466#ifdef __BIG_ENDIAN
3467 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3468#else
3469 DMAE_CMD_ENDIANITY_DW_SWAP |
3470#endif
3471 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3472 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3473 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3474 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3475 dmae->dst_addr_lo = bp->func_stx >> 2;
3476 dmae->dst_addr_hi = 0;
3477 dmae->len = sizeof(struct host_func_stats) >> 2;
3478 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3479 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3480 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 3481
bb2a0f7a
YG
3482 *stats_comp = 0;
3483}
a2fbb9ea 3484
bb2a0f7a
YG
3485static void bnx2x_stats_start(struct bnx2x *bp)
3486{
3487 if (bp->port.pmf)
3488 bnx2x_port_stats_init(bp);
3489
3490 else if (bp->func_stx)
3491 bnx2x_func_stats_init(bp);
3492
3493 bnx2x_hw_stats_post(bp);
3494 bnx2x_storm_stats_post(bp);
3495}
3496
3497static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3498{
3499 bnx2x_stats_comp(bp);
3500 bnx2x_stats_pmf_update(bp);
3501 bnx2x_stats_start(bp);
3502}
3503
3504static void bnx2x_stats_restart(struct bnx2x *bp)
3505{
3506 bnx2x_stats_comp(bp);
3507 bnx2x_stats_start(bp);
3508}
3509
3510static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3511{
3512 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3513 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3514 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3515 struct regpair diff;
3516
3517 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3518 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3519 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3520 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3521 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3522 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
66e855f3 3523 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
bb2a0f7a 3524 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
de832a55 3525 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
bb2a0f7a
YG
3526 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3527 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3528 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3529 UPDATE_STAT64(tx_stat_gt127,
3530 tx_stat_etherstatspkts65octetsto127octets);
3531 UPDATE_STAT64(tx_stat_gt255,
3532 tx_stat_etherstatspkts128octetsto255octets);
3533 UPDATE_STAT64(tx_stat_gt511,
3534 tx_stat_etherstatspkts256octetsto511octets);
3535 UPDATE_STAT64(tx_stat_gt1023,
3536 tx_stat_etherstatspkts512octetsto1023octets);
3537 UPDATE_STAT64(tx_stat_gt1518,
3538 tx_stat_etherstatspkts1024octetsto1522octets);
3539 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3540 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3541 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3542 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3543 UPDATE_STAT64(tx_stat_gterr,
3544 tx_stat_dot3statsinternalmactransmiterrors);
3545 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
de832a55
EG
3546
3547 estats->pause_frames_received_hi =
3548 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3549 estats->pause_frames_received_lo =
3550 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3551
3552 estats->pause_frames_sent_hi =
3553 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3554 estats->pause_frames_sent_lo =
3555 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
bb2a0f7a
YG
3556}
3557
3558static void bnx2x_emac_stats_update(struct bnx2x *bp)
3559{
3560 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3561 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
de832a55 3562 struct bnx2x_eth_stats *estats = &bp->eth_stats;
bb2a0f7a
YG
3563
3564 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3565 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3566 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3567 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3568 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3569 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3570 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3571 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3572 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3573 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3574 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3575 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3576 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3577 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3578 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3579 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3580 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3581 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3582 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3583 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3584 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3585 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3586 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3587 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3588 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3589 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3590 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3591 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3592 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3593 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3594 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
de832a55
EG
3595
3596 estats->pause_frames_received_hi =
3597 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3598 estats->pause_frames_received_lo =
3599 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3600 ADD_64(estats->pause_frames_received_hi,
3601 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3602 estats->pause_frames_received_lo,
3603 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3604
3605 estats->pause_frames_sent_hi =
3606 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3607 estats->pause_frames_sent_lo =
3608 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3609 ADD_64(estats->pause_frames_sent_hi,
3610 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3611 estats->pause_frames_sent_lo,
3612 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
bb2a0f7a
YG
3613}
3614
3615static int bnx2x_hw_stats_update(struct bnx2x *bp)
3616{
3617 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3618 struct nig_stats *old = &(bp->port.old_nig_stats);
3619 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3620 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3621 struct regpair diff;
de832a55 3622 u32 nig_timer_max;
bb2a0f7a
YG
3623
3624 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3625 bnx2x_bmac_stats_update(bp);
3626
3627 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3628 bnx2x_emac_stats_update(bp);
3629
3630 else { /* unreached */
3631 BNX2X_ERR("stats updated by dmae but no MAC active\n");
3632 return -1;
3633 }
a2fbb9ea 3634
bb2a0f7a
YG
3635 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3636 new->brb_discard - old->brb_discard);
66e855f3
YG
3637 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3638 new->brb_truncate - old->brb_truncate);
a2fbb9ea 3639
bb2a0f7a
YG
3640 UPDATE_STAT64_NIG(egress_mac_pkt0,
3641 etherstatspkts1024octetsto1522octets);
3642 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
a2fbb9ea 3643
bb2a0f7a 3644 memcpy(old, new, sizeof(struct nig_stats));
a2fbb9ea 3645
bb2a0f7a
YG
3646 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3647 sizeof(struct mac_stx));
3648 estats->brb_drop_hi = pstats->brb_drop_hi;
3649 estats->brb_drop_lo = pstats->brb_drop_lo;
a2fbb9ea 3650
bb2a0f7a 3651 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
a2fbb9ea 3652
de832a55
EG
3653 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3654 if (nig_timer_max != estats->nig_timer_max) {
3655 estats->nig_timer_max = nig_timer_max;
3656 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3657 }
3658
bb2a0f7a 3659 return 0;
a2fbb9ea
ET
3660}
3661
bb2a0f7a 3662static int bnx2x_storm_stats_update(struct bnx2x *bp)
a2fbb9ea
ET
3663{
3664 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
bb2a0f7a 3665 struct tstorm_per_port_stats *tport =
de832a55 3666 &stats->tstorm_common.port_statistics;
bb2a0f7a
YG
3667 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3668 struct bnx2x_eth_stats *estats = &bp->eth_stats;
de832a55
EG
3669 int i;
3670
3671 memset(&(fstats->total_bytes_received_hi), 0,
3672 sizeof(struct host_func_stats) - 2*sizeof(u32));
3673 estats->error_bytes_received_hi = 0;
3674 estats->error_bytes_received_lo = 0;
3675 estats->etherstatsoverrsizepkts_hi = 0;
3676 estats->etherstatsoverrsizepkts_lo = 0;
3677 estats->no_buff_discard_hi = 0;
3678 estats->no_buff_discard_lo = 0;
a2fbb9ea 3679
de832a55
EG
3680 for_each_queue(bp, i) {
3681 struct bnx2x_fastpath *fp = &bp->fp[i];
3682 int cl_id = fp->cl_id;
3683 struct tstorm_per_client_stats *tclient =
3684 &stats->tstorm_common.client_statistics[cl_id];
3685 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3686 struct ustorm_per_client_stats *uclient =
3687 &stats->ustorm_common.client_statistics[cl_id];
3688 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3689 struct xstorm_per_client_stats *xclient =
3690 &stats->xstorm_common.client_statistics[cl_id];
3691 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3692 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3693 u32 diff;
3694
3695 /* are storm stats valid? */
3696 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
bb2a0f7a 3697 bp->stats_counter) {
de832a55
EG
3698 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3699 " xstorm counter (%d) != stats_counter (%d)\n",
3700 i, xclient->stats_counter, bp->stats_counter);
3701 return -1;
3702 }
3703 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
bb2a0f7a 3704 bp->stats_counter) {
de832a55
EG
3705 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3706 " tstorm counter (%d) != stats_counter (%d)\n",
3707 i, tclient->stats_counter, bp->stats_counter);
3708 return -2;
3709 }
3710 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3711 bp->stats_counter) {
3712 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3713 " ustorm counter (%d) != stats_counter (%d)\n",
3714 i, uclient->stats_counter, bp->stats_counter);
3715 return -4;
3716 }
a2fbb9ea 3717
de832a55
EG
3718 qstats->total_bytes_received_hi =
3719 qstats->valid_bytes_received_hi =
a2fbb9ea 3720 le32_to_cpu(tclient->total_rcv_bytes.hi);
de832a55
EG
3721 qstats->total_bytes_received_lo =
3722 qstats->valid_bytes_received_lo =
a2fbb9ea 3723 le32_to_cpu(tclient->total_rcv_bytes.lo);
bb2a0f7a 3724
de832a55 3725 qstats->error_bytes_received_hi =
bb2a0f7a 3726 le32_to_cpu(tclient->rcv_error_bytes.hi);
de832a55 3727 qstats->error_bytes_received_lo =
bb2a0f7a 3728 le32_to_cpu(tclient->rcv_error_bytes.lo);
bb2a0f7a 3729
de832a55
EG
3730 ADD_64(qstats->total_bytes_received_hi,
3731 qstats->error_bytes_received_hi,
3732 qstats->total_bytes_received_lo,
3733 qstats->error_bytes_received_lo);
3734
3735 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3736 total_unicast_packets_received);
3737 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3738 total_multicast_packets_received);
3739 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3740 total_broadcast_packets_received);
3741 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3742 etherstatsoverrsizepkts);
3743 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3744
3745 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3746 total_unicast_packets_received);
3747 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3748 total_multicast_packets_received);
3749 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3750 total_broadcast_packets_received);
3751 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3752 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3753 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3754
3755 qstats->total_bytes_transmitted_hi =
bb2a0f7a 3756 le32_to_cpu(xclient->total_sent_bytes.hi);
de832a55 3757 qstats->total_bytes_transmitted_lo =
bb2a0f7a
YG
3758 le32_to_cpu(xclient->total_sent_bytes.lo);
3759
de832a55
EG
3760 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3761 total_unicast_packets_transmitted);
3762 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3763 total_multicast_packets_transmitted);
3764 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3765 total_broadcast_packets_transmitted);
3766
3767 old_tclient->checksum_discard = tclient->checksum_discard;
3768 old_tclient->ttl0_discard = tclient->ttl0_discard;
3769
3770 ADD_64(fstats->total_bytes_received_hi,
3771 qstats->total_bytes_received_hi,
3772 fstats->total_bytes_received_lo,
3773 qstats->total_bytes_received_lo);
3774 ADD_64(fstats->total_bytes_transmitted_hi,
3775 qstats->total_bytes_transmitted_hi,
3776 fstats->total_bytes_transmitted_lo,
3777 qstats->total_bytes_transmitted_lo);
3778 ADD_64(fstats->total_unicast_packets_received_hi,
3779 qstats->total_unicast_packets_received_hi,
3780 fstats->total_unicast_packets_received_lo,
3781 qstats->total_unicast_packets_received_lo);
3782 ADD_64(fstats->total_multicast_packets_received_hi,
3783 qstats->total_multicast_packets_received_hi,
3784 fstats->total_multicast_packets_received_lo,
3785 qstats->total_multicast_packets_received_lo);
3786 ADD_64(fstats->total_broadcast_packets_received_hi,
3787 qstats->total_broadcast_packets_received_hi,
3788 fstats->total_broadcast_packets_received_lo,
3789 qstats->total_broadcast_packets_received_lo);
3790 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3791 qstats->total_unicast_packets_transmitted_hi,
3792 fstats->total_unicast_packets_transmitted_lo,
3793 qstats->total_unicast_packets_transmitted_lo);
3794 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3795 qstats->total_multicast_packets_transmitted_hi,
3796 fstats->total_multicast_packets_transmitted_lo,
3797 qstats->total_multicast_packets_transmitted_lo);
3798 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3799 qstats->total_broadcast_packets_transmitted_hi,
3800 fstats->total_broadcast_packets_transmitted_lo,
3801 qstats->total_broadcast_packets_transmitted_lo);
3802 ADD_64(fstats->valid_bytes_received_hi,
3803 qstats->valid_bytes_received_hi,
3804 fstats->valid_bytes_received_lo,
3805 qstats->valid_bytes_received_lo);
3806
3807 ADD_64(estats->error_bytes_received_hi,
3808 qstats->error_bytes_received_hi,
3809 estats->error_bytes_received_lo,
3810 qstats->error_bytes_received_lo);
3811 ADD_64(estats->etherstatsoverrsizepkts_hi,
3812 qstats->etherstatsoverrsizepkts_hi,
3813 estats->etherstatsoverrsizepkts_lo,
3814 qstats->etherstatsoverrsizepkts_lo);
3815 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3816 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3817 }
3818
3819 ADD_64(fstats->total_bytes_received_hi,
3820 estats->rx_stat_ifhcinbadoctets_hi,
3821 fstats->total_bytes_received_lo,
3822 estats->rx_stat_ifhcinbadoctets_lo);
bb2a0f7a
YG
3823
3824 memcpy(estats, &(fstats->total_bytes_received_hi),
3825 sizeof(struct host_func_stats) - 2*sizeof(u32));
3826
de832a55
EG
3827 ADD_64(estats->etherstatsoverrsizepkts_hi,
3828 estats->rx_stat_dot3statsframestoolong_hi,
3829 estats->etherstatsoverrsizepkts_lo,
3830 estats->rx_stat_dot3statsframestoolong_lo);
3831 ADD_64(estats->error_bytes_received_hi,
3832 estats->rx_stat_ifhcinbadoctets_hi,
3833 estats->error_bytes_received_lo,
3834 estats->rx_stat_ifhcinbadoctets_lo);
3835
3836 if (bp->port.pmf) {
3837 estats->mac_filter_discard =
3838 le32_to_cpu(tport->mac_filter_discard);
3839 estats->xxoverflow_discard =
3840 le32_to_cpu(tport->xxoverflow_discard);
3841 estats->brb_truncate_discard =
bb2a0f7a 3842 le32_to_cpu(tport->brb_truncate_discard);
de832a55
EG
3843 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3844 }
bb2a0f7a
YG
3845
3846 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
a2fbb9ea 3847
de832a55
EG
3848 bp->stats_pending = 0;
3849
a2fbb9ea
ET
3850 return 0;
3851}
3852
bb2a0f7a 3853static void bnx2x_net_stats_update(struct bnx2x *bp)
a2fbb9ea 3854{
bb2a0f7a 3855 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3856 struct net_device_stats *nstats = &bp->dev->stats;
de832a55 3857 int i;
a2fbb9ea
ET
3858
3859 nstats->rx_packets =
3860 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3861 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3862 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3863
3864 nstats->tx_packets =
3865 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3866 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3867 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3868
de832a55 3869 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
a2fbb9ea 3870
0e39e645 3871 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
a2fbb9ea 3872
de832a55
EG
3873 nstats->rx_dropped = estats->mac_discard;
3874 for_each_queue(bp, i)
3875 nstats->rx_dropped +=
3876 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3877
a2fbb9ea
ET
3878 nstats->tx_dropped = 0;
3879
3880 nstats->multicast =
de832a55 3881 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
a2fbb9ea 3882
bb2a0f7a 3883 nstats->collisions =
de832a55 3884 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
bb2a0f7a
YG
3885
3886 nstats->rx_length_errors =
de832a55
EG
3887 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3888 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3889 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3890 bnx2x_hilo(&estats->brb_truncate_hi);
3891 nstats->rx_crc_errors =
3892 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3893 nstats->rx_frame_errors =
3894 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3895 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
a2fbb9ea
ET
3896 nstats->rx_missed_errors = estats->xxoverflow_discard;
3897
3898 nstats->rx_errors = nstats->rx_length_errors +
3899 nstats->rx_over_errors +
3900 nstats->rx_crc_errors +
3901 nstats->rx_frame_errors +
0e39e645
ET
3902 nstats->rx_fifo_errors +
3903 nstats->rx_missed_errors;
a2fbb9ea 3904
bb2a0f7a 3905 nstats->tx_aborted_errors =
de832a55
EG
3906 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3907 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3908 nstats->tx_carrier_errors =
3909 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
a2fbb9ea
ET
3910 nstats->tx_fifo_errors = 0;
3911 nstats->tx_heartbeat_errors = 0;
3912 nstats->tx_window_errors = 0;
3913
3914 nstats->tx_errors = nstats->tx_aborted_errors +
de832a55
EG
3915 nstats->tx_carrier_errors +
3916 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3917}
3918
3919static void bnx2x_drv_stats_update(struct bnx2x *bp)
3920{
3921 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3922 int i;
3923
3924 estats->driver_xoff = 0;
3925 estats->rx_err_discard_pkt = 0;
3926 estats->rx_skb_alloc_failed = 0;
3927 estats->hw_csum_err = 0;
3928 for_each_queue(bp, i) {
3929 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
3930
3931 estats->driver_xoff += qstats->driver_xoff;
3932 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
3933 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
3934 estats->hw_csum_err += qstats->hw_csum_err;
3935 }
a2fbb9ea
ET
3936}
3937
bb2a0f7a 3938static void bnx2x_stats_update(struct bnx2x *bp)
a2fbb9ea 3939{
bb2a0f7a 3940 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 3941
bb2a0f7a
YG
3942 if (*stats_comp != DMAE_COMP_VAL)
3943 return;
3944
3945 if (bp->port.pmf)
de832a55 3946 bnx2x_hw_stats_update(bp);
a2fbb9ea 3947
de832a55
EG
3948 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
3949 BNX2X_ERR("storm stats were not updated for 3 times\n");
3950 bnx2x_panic();
3951 return;
a2fbb9ea
ET
3952 }
3953
de832a55
EG
3954 bnx2x_net_stats_update(bp);
3955 bnx2x_drv_stats_update(bp);
3956
a2fbb9ea 3957 if (bp->msglevel & NETIF_MSG_TIMER) {
de832a55
EG
3958 struct tstorm_per_client_stats *old_tclient =
3959 &bp->fp->old_tclient;
3960 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
bb2a0f7a 3961 struct bnx2x_eth_stats *estats = &bp->eth_stats;
a2fbb9ea 3962 struct net_device_stats *nstats = &bp->dev->stats;
34f80b04 3963 int i;
a2fbb9ea
ET
3964
3965 printk(KERN_DEBUG "%s:\n", bp->dev->name);
3966 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
3967 " tx pkt (%lx)\n",
3968 bnx2x_tx_avail(bp->fp),
7a9b2557 3969 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
a2fbb9ea
ET
3970 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
3971 " rx pkt (%lx)\n",
7a9b2557
VZ
3972 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
3973 bp->fp->rx_comp_cons),
3974 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
de832a55
EG
3975 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
3976 "brb truncate %u\n",
3977 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
3978 qstats->driver_xoff,
3979 estats->brb_drop_lo, estats->brb_truncate_lo);
a2fbb9ea 3980 printk(KERN_DEBUG "tstats: checksum_discard %u "
de832a55 3981 "packets_too_big_discard %lu no_buff_discard %lu "
a2fbb9ea
ET
3982 "mac_discard %u mac_filter_discard %u "
3983 "xxovrflow_discard %u brb_truncate_discard %u "
3984 "ttl0_discard %u\n",
bb2a0f7a 3985 old_tclient->checksum_discard,
de832a55
EG
3986 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
3987 bnx2x_hilo(&qstats->no_buff_discard_hi),
3988 estats->mac_discard, estats->mac_filter_discard,
3989 estats->xxoverflow_discard, estats->brb_truncate_discard,
bb2a0f7a 3990 old_tclient->ttl0_discard);
a2fbb9ea
ET
3991
3992 for_each_queue(bp, i) {
3993 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
3994 bnx2x_fp(bp, i, tx_pkt),
3995 bnx2x_fp(bp, i, rx_pkt),
3996 bnx2x_fp(bp, i, rx_calls));
3997 }
3998 }
3999
bb2a0f7a
YG
4000 bnx2x_hw_stats_post(bp);
4001 bnx2x_storm_stats_post(bp);
4002}
a2fbb9ea 4003
bb2a0f7a
YG
4004static void bnx2x_port_stats_stop(struct bnx2x *bp)
4005{
4006 struct dmae_command *dmae;
4007 u32 opcode;
4008 int loader_idx = PMF_DMAE_C(bp);
4009 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
a2fbb9ea 4010
bb2a0f7a 4011 bp->executer_idx = 0;
a2fbb9ea 4012
bb2a0f7a
YG
4013 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4014 DMAE_CMD_C_ENABLE |
4015 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
a2fbb9ea 4016#ifdef __BIG_ENDIAN
bb2a0f7a 4017 DMAE_CMD_ENDIANITY_B_DW_SWAP |
a2fbb9ea 4018#else
bb2a0f7a 4019 DMAE_CMD_ENDIANITY_DW_SWAP |
a2fbb9ea 4020#endif
bb2a0f7a
YG
4021 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4022 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4023
4024 if (bp->port.port_stx) {
4025
4026 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4027 if (bp->func_stx)
4028 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4029 else
4030 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4031 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4032 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4033 dmae->dst_addr_lo = bp->port.port_stx >> 2;
a2fbb9ea 4034 dmae->dst_addr_hi = 0;
bb2a0f7a
YG
4035 dmae->len = sizeof(struct host_port_stats) >> 2;
4036 if (bp->func_stx) {
4037 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4038 dmae->comp_addr_hi = 0;
4039 dmae->comp_val = 1;
4040 } else {
4041 dmae->comp_addr_lo =
4042 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4043 dmae->comp_addr_hi =
4044 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4045 dmae->comp_val = DMAE_COMP_VAL;
a2fbb9ea 4046
bb2a0f7a
YG
4047 *stats_comp = 0;
4048 }
a2fbb9ea
ET
4049 }
4050
bb2a0f7a
YG
4051 if (bp->func_stx) {
4052
4053 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4054 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4055 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4056 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4057 dmae->dst_addr_lo = bp->func_stx >> 2;
4058 dmae->dst_addr_hi = 0;
4059 dmae->len = sizeof(struct host_func_stats) >> 2;
4060 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4061 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4062 dmae->comp_val = DMAE_COMP_VAL;
4063
4064 *stats_comp = 0;
a2fbb9ea 4065 }
bb2a0f7a
YG
4066}
4067
4068static void bnx2x_stats_stop(struct bnx2x *bp)
4069{
4070 int update = 0;
4071
4072 bnx2x_stats_comp(bp);
4073
4074 if (bp->port.pmf)
4075 update = (bnx2x_hw_stats_update(bp) == 0);
4076
4077 update |= (bnx2x_storm_stats_update(bp) == 0);
4078
4079 if (update) {
4080 bnx2x_net_stats_update(bp);
a2fbb9ea 4081
bb2a0f7a
YG
4082 if (bp->port.pmf)
4083 bnx2x_port_stats_stop(bp);
4084
4085 bnx2x_hw_stats_post(bp);
4086 bnx2x_stats_comp(bp);
a2fbb9ea
ET
4087 }
4088}
4089
bb2a0f7a
YG
4090static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4091{
4092}
4093
4094static const struct {
4095 void (*action)(struct bnx2x *bp);
4096 enum bnx2x_stats_state next_state;
4097} bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4098/* state event */
4099{
4100/* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4101/* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4102/* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4103/* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4104},
4105{
4106/* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4107/* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4108/* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4109/* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4110}
4111};
4112
4113static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4114{
4115 enum bnx2x_stats_state state = bp->stats_state;
4116
4117 bnx2x_stats_stm[state][event].action(bp);
4118 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4119
4120 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4121 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4122 state, event, bp->stats_state);
4123}
4124
a2fbb9ea
ET
4125static void bnx2x_timer(unsigned long data)
4126{
4127 struct bnx2x *bp = (struct bnx2x *) data;
4128
4129 if (!netif_running(bp->dev))
4130 return;
4131
4132 if (atomic_read(&bp->intr_sem) != 0)
f1410647 4133 goto timer_restart;
a2fbb9ea
ET
4134
4135 if (poll) {
4136 struct bnx2x_fastpath *fp = &bp->fp[0];
4137 int rc;
4138
4139 bnx2x_tx_int(fp, 1000);
4140 rc = bnx2x_rx_int(fp, 1000);
4141 }
4142
34f80b04
EG
4143 if (!BP_NOMCP(bp)) {
4144 int func = BP_FUNC(bp);
a2fbb9ea
ET
4145 u32 drv_pulse;
4146 u32 mcp_pulse;
4147
4148 ++bp->fw_drv_pulse_wr_seq;
4149 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4150 /* TBD - add SYSTEM_TIME */
4151 drv_pulse = bp->fw_drv_pulse_wr_seq;
34f80b04 4152 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
a2fbb9ea 4153
34f80b04 4154 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
a2fbb9ea
ET
4155 MCP_PULSE_SEQ_MASK);
4156 /* The delta between driver pulse and mcp response
4157 * should be 1 (before mcp response) or 0 (after mcp response)
4158 */
4159 if ((drv_pulse != mcp_pulse) &&
4160 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4161 /* someone lost a heartbeat... */
4162 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4163 drv_pulse, mcp_pulse);
4164 }
4165 }
4166
bb2a0f7a
YG
4167 if ((bp->state == BNX2X_STATE_OPEN) ||
4168 (bp->state == BNX2X_STATE_DISABLED))
4169 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
a2fbb9ea 4170
f1410647 4171timer_restart:
a2fbb9ea
ET
4172 mod_timer(&bp->timer, jiffies + bp->current_interval);
4173}
4174
4175/* end of Statistics */
4176
4177/* nic init */
4178
4179/*
4180 * nic init service functions
4181 */
4182
34f80b04 4183static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
a2fbb9ea 4184{
34f80b04
EG
4185 int port = BP_PORT(bp);
4186
4187 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4188 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4189 sizeof(struct ustorm_status_block)/4);
34f80b04
EG
4190 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4191 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
35302989 4192 sizeof(struct cstorm_status_block)/4);
34f80b04
EG
4193}
4194
5c862848
EG
4195static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4196 dma_addr_t mapping, int sb_id)
34f80b04
EG
4197{
4198 int port = BP_PORT(bp);
bb2a0f7a 4199 int func = BP_FUNC(bp);
a2fbb9ea 4200 int index;
34f80b04 4201 u64 section;
a2fbb9ea
ET
4202
4203 /* USTORM */
4204 section = ((u64)mapping) + offsetof(struct host_status_block,
4205 u_status_block);
34f80b04 4206 sb->u_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4207
4208 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4209 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4210 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4211 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4212 U64_HI(section));
bb2a0f7a
YG
4213 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4214 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4215
4216 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4217 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4218 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
a2fbb9ea
ET
4219
4220 /* CSTORM */
4221 section = ((u64)mapping) + offsetof(struct host_status_block,
4222 c_status_block);
34f80b04 4223 sb->c_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4224
4225 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4226 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
a2fbb9ea 4227 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4228 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
a2fbb9ea 4229 U64_HI(section));
7a9b2557
VZ
4230 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4231 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
a2fbb9ea
ET
4232
4233 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4234 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04
EG
4235 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4236
4237 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4238}
4239
4240static void bnx2x_zero_def_sb(struct bnx2x *bp)
4241{
4242 int func = BP_FUNC(bp);
a2fbb9ea 4243
34f80b04
EG
4244 bnx2x_init_fill(bp, BAR_USTRORM_INTMEM +
4245 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4246 sizeof(struct ustorm_def_status_block)/4);
4247 bnx2x_init_fill(bp, BAR_CSTRORM_INTMEM +
4248 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4249 sizeof(struct cstorm_def_status_block)/4);
4250 bnx2x_init_fill(bp, BAR_XSTRORM_INTMEM +
4251 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4252 sizeof(struct xstorm_def_status_block)/4);
4253 bnx2x_init_fill(bp, BAR_TSTRORM_INTMEM +
4254 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4255 sizeof(struct tstorm_def_status_block)/4);
a2fbb9ea
ET
4256}
4257
4258static void bnx2x_init_def_sb(struct bnx2x *bp,
4259 struct host_def_status_block *def_sb,
34f80b04 4260 dma_addr_t mapping, int sb_id)
a2fbb9ea 4261{
34f80b04
EG
4262 int port = BP_PORT(bp);
4263 int func = BP_FUNC(bp);
a2fbb9ea
ET
4264 int index, val, reg_offset;
4265 u64 section;
4266
4267 /* ATTN */
4268 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4269 atten_status_block);
34f80b04 4270 def_sb->atten_status_block.status_block_id = sb_id;
a2fbb9ea 4271
49d66772
ET
4272 bp->attn_state = 0;
4273
a2fbb9ea
ET
4274 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4275 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4276
34f80b04 4277 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
a2fbb9ea
ET
4278 bp->attn_group[index].sig[0] = REG_RD(bp,
4279 reg_offset + 0x10*index);
4280 bp->attn_group[index].sig[1] = REG_RD(bp,
4281 reg_offset + 0x4 + 0x10*index);
4282 bp->attn_group[index].sig[2] = REG_RD(bp,
4283 reg_offset + 0x8 + 0x10*index);
4284 bp->attn_group[index].sig[3] = REG_RD(bp,
4285 reg_offset + 0xc + 0x10*index);
4286 }
4287
a2fbb9ea
ET
4288 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4289 HC_REG_ATTN_MSG0_ADDR_L);
4290
4291 REG_WR(bp, reg_offset, U64_LO(section));
4292 REG_WR(bp, reg_offset + 4, U64_HI(section));
4293
4294 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4295
4296 val = REG_RD(bp, reg_offset);
34f80b04 4297 val |= sb_id;
a2fbb9ea
ET
4298 REG_WR(bp, reg_offset, val);
4299
4300 /* USTORM */
4301 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4302 u_def_status_block);
34f80b04 4303 def_sb->u_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4304
4305 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4306 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4307 REG_WR(bp, BAR_USTRORM_INTMEM +
34f80b04 4308 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4309 U64_HI(section));
5c862848 4310 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
34f80b04 4311 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4312
4313 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4314 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4315 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4316
4317 /* CSTORM */
4318 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4319 c_def_status_block);
34f80b04 4320 def_sb->c_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4321
4322 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4323 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4324 REG_WR(bp, BAR_CSTRORM_INTMEM +
34f80b04 4325 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4326 U64_HI(section));
5c862848 4327 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
34f80b04 4328 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4329
4330 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4331 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4332 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4333
4334 /* TSTORM */
4335 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4336 t_def_status_block);
34f80b04 4337 def_sb->t_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4338
4339 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4340 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4341 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4342 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4343 U64_HI(section));
5c862848 4344 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
34f80b04 4345 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4346
4347 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4348 REG_WR16(bp, BAR_TSTRORM_INTMEM +
34f80b04 4349 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
a2fbb9ea
ET
4350
4351 /* XSTORM */
4352 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4353 x_def_status_block);
34f80b04 4354 def_sb->x_def_status_block.status_block_id = sb_id;
a2fbb9ea
ET
4355
4356 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4357 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
a2fbb9ea 4358 REG_WR(bp, BAR_XSTRORM_INTMEM +
34f80b04 4359 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
a2fbb9ea 4360 U64_HI(section));
5c862848 4361 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
34f80b04 4362 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
a2fbb9ea
ET
4363
4364 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4365 REG_WR16(bp, BAR_XSTRORM_INTMEM +
34f80b04 4366 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
49d66772 4367
bb2a0f7a 4368 bp->stats_pending = 0;
66e855f3 4369 bp->set_mac_pending = 0;
bb2a0f7a 4370
34f80b04 4371 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
4372}
4373
4374static void bnx2x_update_coalesce(struct bnx2x *bp)
4375{
34f80b04 4376 int port = BP_PORT(bp);
a2fbb9ea
ET
4377 int i;
4378
4379 for_each_queue(bp, i) {
34f80b04 4380 int sb_id = bp->fp[i].sb_id;
a2fbb9ea
ET
4381
4382 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4383 REG_WR8(bp, BAR_USTRORM_INTMEM +
34f80b04 4384 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4385 U_SB_ETH_RX_CQ_INDEX),
34f80b04 4386 bp->rx_ticks/12);
a2fbb9ea 4387 REG_WR16(bp, BAR_USTRORM_INTMEM +
34f80b04 4388 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848
EG
4389 U_SB_ETH_RX_CQ_INDEX),
4390 bp->rx_ticks ? 0 : 1);
a2fbb9ea
ET
4391
4392 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4393 REG_WR8(bp, BAR_CSTRORM_INTMEM +
34f80b04 4394 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
5c862848 4395 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4396 bp->tx_ticks/12);
a2fbb9ea 4397 REG_WR16(bp, BAR_CSTRORM_INTMEM +
34f80b04 4398 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
5c862848 4399 C_SB_ETH_TX_CQ_INDEX),
34f80b04 4400 bp->tx_ticks ? 0 : 1);
a2fbb9ea
ET
4401 }
4402}
4403
7a9b2557
VZ
4404static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4405 struct bnx2x_fastpath *fp, int last)
4406{
4407 int i;
4408
4409 for (i = 0; i < last; i++) {
4410 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4411 struct sk_buff *skb = rx_buf->skb;
4412
4413 if (skb == NULL) {
4414 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4415 continue;
4416 }
4417
4418 if (fp->tpa_state[i] == BNX2X_TPA_START)
4419 pci_unmap_single(bp->pdev,
4420 pci_unmap_addr(rx_buf, mapping),
437cf2f1 4421 bp->rx_buf_size,
7a9b2557
VZ
4422 PCI_DMA_FROMDEVICE);
4423
4424 dev_kfree_skb(skb);
4425 rx_buf->skb = NULL;
4426 }
4427}
4428
a2fbb9ea
ET
4429static void bnx2x_init_rx_rings(struct bnx2x *bp)
4430{
7a9b2557 4431 int func = BP_FUNC(bp);
32626230
EG
4432 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4433 ETH_MAX_AGGREGATION_QUEUES_E1H;
4434 u16 ring_prod, cqe_ring_prod;
a2fbb9ea 4435 int i, j;
a2fbb9ea 4436
87942b46 4437 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
0f00846d
EG
4438 DP(NETIF_MSG_IFUP,
4439 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
a2fbb9ea 4440
7a9b2557 4441 if (bp->flags & TPA_ENABLE_FLAG) {
7a9b2557 4442
555f6c78 4443 for_each_rx_queue(bp, j) {
32626230 4444 struct bnx2x_fastpath *fp = &bp->fp[j];
7a9b2557 4445
32626230 4446 for (i = 0; i < max_agg_queues; i++) {
7a9b2557
VZ
4447 fp->tpa_pool[i].skb =
4448 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4449 if (!fp->tpa_pool[i].skb) {
4450 BNX2X_ERR("Failed to allocate TPA "
4451 "skb pool for queue[%d] - "
4452 "disabling TPA on this "
4453 "queue!\n", j);
4454 bnx2x_free_tpa_pool(bp, fp, i);
4455 fp->disable_tpa = 1;
4456 break;
4457 }
4458 pci_unmap_addr_set((struct sw_rx_bd *)
4459 &bp->fp->tpa_pool[i],
4460 mapping, 0);
4461 fp->tpa_state[i] = BNX2X_TPA_STOP;
4462 }
4463 }
4464 }
4465
555f6c78 4466 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
4467 struct bnx2x_fastpath *fp = &bp->fp[j];
4468
4469 fp->rx_bd_cons = 0;
4470 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
7a9b2557
VZ
4471 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4472
4473 /* "next page" elements initialization */
4474 /* SGE ring */
4475 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4476 struct eth_rx_sge *sge;
4477
4478 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4479 sge->addr_hi =
4480 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4481 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4482 sge->addr_lo =
4483 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4484 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4485 }
4486
4487 bnx2x_init_sge_ring_bit_mask(fp);
a2fbb9ea 4488
7a9b2557 4489 /* RX BD ring */
a2fbb9ea
ET
4490 for (i = 1; i <= NUM_RX_RINGS; i++) {
4491 struct eth_rx_bd *rx_bd;
4492
4493 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4494 rx_bd->addr_hi =
4495 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
34f80b04 4496 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4497 rx_bd->addr_lo =
4498 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
34f80b04 4499 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
a2fbb9ea
ET
4500 }
4501
34f80b04 4502 /* CQ ring */
a2fbb9ea
ET
4503 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4504 struct eth_rx_cqe_next_page *nextpg;
4505
4506 nextpg = (struct eth_rx_cqe_next_page *)
4507 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4508 nextpg->addr_hi =
4509 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
34f80b04 4510 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4511 nextpg->addr_lo =
4512 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
34f80b04 4513 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
a2fbb9ea
ET
4514 }
4515
7a9b2557
VZ
4516 /* Allocate SGEs and initialize the ring elements */
4517 for (i = 0, ring_prod = 0;
4518 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
a2fbb9ea 4519
7a9b2557
VZ
4520 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4521 BNX2X_ERR("was only able to allocate "
4522 "%d rx sges\n", i);
4523 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4524 /* Cleanup already allocated elements */
4525 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
32626230 4526 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
7a9b2557
VZ
4527 fp->disable_tpa = 1;
4528 ring_prod = 0;
4529 break;
4530 }
4531 ring_prod = NEXT_SGE_IDX(ring_prod);
4532 }
4533 fp->rx_sge_prod = ring_prod;
4534
4535 /* Allocate BDs and initialize BD ring */
66e855f3 4536 fp->rx_comp_cons = 0;
7a9b2557 4537 cqe_ring_prod = ring_prod = 0;
a2fbb9ea
ET
4538 for (i = 0; i < bp->rx_ring_size; i++) {
4539 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4540 BNX2X_ERR("was only able to allocate "
de832a55
EG
4541 "%d rx skbs on queue[%d]\n", i, j);
4542 fp->eth_q_stats.rx_skb_alloc_failed++;
a2fbb9ea
ET
4543 break;
4544 }
4545 ring_prod = NEXT_RX_IDX(ring_prod);
7a9b2557 4546 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
53e5e96e 4547 WARN_ON(ring_prod <= i);
a2fbb9ea
ET
4548 }
4549
7a9b2557
VZ
4550 fp->rx_bd_prod = ring_prod;
4551 /* must not have more available CQEs than BDs */
4552 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4553 cqe_ring_prod);
a2fbb9ea
ET
4554 fp->rx_pkt = fp->rx_calls = 0;
4555
7a9b2557
VZ
4556 /* Warning!
4557 * this will generate an interrupt (to the TSTORM)
4558 * must only be done after chip is initialized
4559 */
4560 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4561 fp->rx_sge_prod);
a2fbb9ea
ET
4562 if (j != 0)
4563 continue;
4564
4565 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4566 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
a2fbb9ea
ET
4567 U64_LO(fp->rx_comp_mapping));
4568 REG_WR(bp, BAR_USTRORM_INTMEM +
7a9b2557 4569 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
a2fbb9ea
ET
4570 U64_HI(fp->rx_comp_mapping));
4571 }
4572}
4573
4574static void bnx2x_init_tx_ring(struct bnx2x *bp)
4575{
4576 int i, j;
4577
555f6c78 4578 for_each_tx_queue(bp, j) {
a2fbb9ea
ET
4579 struct bnx2x_fastpath *fp = &bp->fp[j];
4580
4581 for (i = 1; i <= NUM_TX_RINGS; i++) {
4582 struct eth_tx_bd *tx_bd =
4583 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4584
4585 tx_bd->addr_hi =
4586 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
34f80b04 4587 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4588 tx_bd->addr_lo =
4589 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
34f80b04 4590 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
a2fbb9ea
ET
4591 }
4592
4593 fp->tx_pkt_prod = 0;
4594 fp->tx_pkt_cons = 0;
4595 fp->tx_bd_prod = 0;
4596 fp->tx_bd_cons = 0;
4597 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4598 fp->tx_pkt = 0;
4599 }
4600}
4601
4602static void bnx2x_init_sp_ring(struct bnx2x *bp)
4603{
34f80b04 4604 int func = BP_FUNC(bp);
a2fbb9ea
ET
4605
4606 spin_lock_init(&bp->spq_lock);
4607
4608 bp->spq_left = MAX_SPQ_PENDING;
4609 bp->spq_prod_idx = 0;
a2fbb9ea
ET
4610 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4611 bp->spq_prod_bd = bp->spq;
4612 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4613
34f80b04 4614 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
a2fbb9ea 4615 U64_LO(bp->spq_mapping));
34f80b04
EG
4616 REG_WR(bp,
4617 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
a2fbb9ea
ET
4618 U64_HI(bp->spq_mapping));
4619
34f80b04 4620 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
a2fbb9ea
ET
4621 bp->spq_prod_idx);
4622}
4623
4624static void bnx2x_init_context(struct bnx2x *bp)
4625{
4626 int i;
4627
4628 for_each_queue(bp, i) {
4629 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4630 struct bnx2x_fastpath *fp = &bp->fp[i];
de832a55 4631 u8 cl_id = fp->cl_id;
34f80b04 4632 u8 sb_id = FP_SB_ID(fp);
a2fbb9ea 4633
34f80b04
EG
4634 context->ustorm_st_context.common.sb_index_numbers =
4635 BNX2X_RX_SB_INDEX_NUM;
4636 context->ustorm_st_context.common.clientId = FP_CL_ID(fp);
4637 context->ustorm_st_context.common.status_block_id = sb_id;
4638 context->ustorm_st_context.common.flags =
de832a55
EG
4639 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4640 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4641 context->ustorm_st_context.common.statistics_counter_id =
4642 cl_id;
8d9c5f34 4643 context->ustorm_st_context.common.mc_alignment_log_size =
0f00846d 4644 BNX2X_RX_ALIGN_SHIFT;
34f80b04 4645 context->ustorm_st_context.common.bd_buff_size =
437cf2f1 4646 bp->rx_buf_size;
34f80b04 4647 context->ustorm_st_context.common.bd_page_base_hi =
a2fbb9ea 4648 U64_HI(fp->rx_desc_mapping);
34f80b04 4649 context->ustorm_st_context.common.bd_page_base_lo =
a2fbb9ea 4650 U64_LO(fp->rx_desc_mapping);
7a9b2557
VZ
4651 if (!fp->disable_tpa) {
4652 context->ustorm_st_context.common.flags |=
4653 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4654 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4655 context->ustorm_st_context.common.sge_buff_size =
8d9c5f34
EG
4656 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4657 (u32)0xffff);
7a9b2557
VZ
4658 context->ustorm_st_context.common.sge_page_base_hi =
4659 U64_HI(fp->rx_sge_mapping);
4660 context->ustorm_st_context.common.sge_page_base_lo =
4661 U64_LO(fp->rx_sge_mapping);
4662 }
4663
8d9c5f34
EG
4664 context->ustorm_ag_context.cdu_usage =
4665 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4666 CDU_REGION_NUMBER_UCM_AG,
4667 ETH_CONNECTION_TYPE);
4668
4669 context->xstorm_st_context.tx_bd_page_base_hi =
4670 U64_HI(fp->tx_desc_mapping);
4671 context->xstorm_st_context.tx_bd_page_base_lo =
4672 U64_LO(fp->tx_desc_mapping);
4673 context->xstorm_st_context.db_data_addr_hi =
4674 U64_HI(fp->tx_prods_mapping);
4675 context->xstorm_st_context.db_data_addr_lo =
4676 U64_LO(fp->tx_prods_mapping);
4677 context->xstorm_st_context.statistics_data = (fp->cl_id |
4678 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
a2fbb9ea 4679 context->cstorm_st_context.sb_index_number =
5c862848 4680 C_SB_ETH_TX_CQ_INDEX;
34f80b04 4681 context->cstorm_st_context.status_block_id = sb_id;
a2fbb9ea
ET
4682
4683 context->xstorm_ag_context.cdu_reserved =
4684 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4685 CDU_REGION_NUMBER_XCM_AG,
4686 ETH_CONNECTION_TYPE);
a2fbb9ea
ET
4687 }
4688}
4689
4690static void bnx2x_init_ind_table(struct bnx2x *bp)
4691{
26c8fa4d 4692 int func = BP_FUNC(bp);
a2fbb9ea
ET
4693 int i;
4694
555f6c78 4695 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
a2fbb9ea
ET
4696 return;
4697
555f6c78
EG
4698 DP(NETIF_MSG_IFUP,
4699 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
a2fbb9ea 4700 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
34f80b04 4701 REG_WR8(bp, BAR_TSTRORM_INTMEM +
26c8fa4d 4702 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
555f6c78 4703 BP_CL_ID(bp) + (i % bp->num_rx_queues));
a2fbb9ea
ET
4704}
4705
49d66772
ET
4706static void bnx2x_set_client_config(struct bnx2x *bp)
4707{
49d66772 4708 struct tstorm_eth_client_config tstorm_client = {0};
34f80b04
EG
4709 int port = BP_PORT(bp);
4710 int i;
49d66772 4711
e7799c5f 4712 tstorm_client.mtu = bp->dev->mtu;
49d66772 4713 tstorm_client.config_flags =
de832a55
EG
4714 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4715 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
49d66772 4716#ifdef BCM_VLAN
0c6671b0 4717 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
49d66772 4718 tstorm_client.config_flags |=
8d9c5f34 4719 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
49d66772
ET
4720 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4721 }
4722#endif
49d66772 4723
7a9b2557
VZ
4724 if (bp->flags & TPA_ENABLE_FLAG) {
4725 tstorm_client.max_sges_for_packet =
4f40f2cb 4726 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
7a9b2557
VZ
4727 tstorm_client.max_sges_for_packet =
4728 ((tstorm_client.max_sges_for_packet +
4729 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4730 PAGES_PER_SGE_SHIFT;
4731
4732 tstorm_client.config_flags |=
4733 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4734 }
4735
49d66772 4736 for_each_queue(bp, i) {
de832a55
EG
4737 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4738
49d66772 4739 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4740 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
49d66772
ET
4741 ((u32 *)&tstorm_client)[0]);
4742 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4743 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
49d66772
ET
4744 ((u32 *)&tstorm_client)[1]);
4745 }
4746
34f80b04
EG
4747 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4748 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
49d66772
ET
4749}
4750
a2fbb9ea
ET
4751static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4752{
a2fbb9ea 4753 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
34f80b04
EG
4754 int mode = bp->rx_mode;
4755 int mask = (1 << BP_L_ID(bp));
4756 int func = BP_FUNC(bp);
a2fbb9ea
ET
4757 int i;
4758
3196a88a 4759 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
a2fbb9ea
ET
4760
4761 switch (mode) {
4762 case BNX2X_RX_MODE_NONE: /* no Rx */
34f80b04
EG
4763 tstorm_mac_filter.ucast_drop_all = mask;
4764 tstorm_mac_filter.mcast_drop_all = mask;
4765 tstorm_mac_filter.bcast_drop_all = mask;
a2fbb9ea
ET
4766 break;
4767 case BNX2X_RX_MODE_NORMAL:
34f80b04 4768 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4769 break;
4770 case BNX2X_RX_MODE_ALLMULTI:
34f80b04
EG
4771 tstorm_mac_filter.mcast_accept_all = mask;
4772 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4773 break;
4774 case BNX2X_RX_MODE_PROMISC:
34f80b04
EG
4775 tstorm_mac_filter.ucast_accept_all = mask;
4776 tstorm_mac_filter.mcast_accept_all = mask;
4777 tstorm_mac_filter.bcast_accept_all = mask;
a2fbb9ea
ET
4778 break;
4779 default:
34f80b04
EG
4780 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4781 break;
a2fbb9ea
ET
4782 }
4783
4784 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4785 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4786 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
a2fbb9ea
ET
4787 ((u32 *)&tstorm_mac_filter)[i]);
4788
34f80b04 4789/* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
a2fbb9ea
ET
4790 ((u32 *)&tstorm_mac_filter)[i]); */
4791 }
a2fbb9ea 4792
49d66772
ET
4793 if (mode != BNX2X_RX_MODE_NONE)
4794 bnx2x_set_client_config(bp);
a2fbb9ea
ET
4795}
4796
471de716
EG
4797static void bnx2x_init_internal_common(struct bnx2x *bp)
4798{
4799 int i;
4800
3cdf1db7
YG
4801 if (bp->flags & TPA_ENABLE_FLAG) {
4802 struct tstorm_eth_tpa_exist tpa = {0};
4803
4804 tpa.tpa_exist = 1;
4805
4806 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4807 ((u32 *)&tpa)[0]);
4808 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4809 ((u32 *)&tpa)[1]);
4810 }
4811
471de716
EG
4812 /* Zero this manually as its initialization is
4813 currently missing in the initTool */
4814 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4815 REG_WR(bp, BAR_USTRORM_INTMEM +
4816 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4817}
4818
4819static void bnx2x_init_internal_port(struct bnx2x *bp)
4820{
4821 int port = BP_PORT(bp);
4822
4823 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4824 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4825 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4826 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4827}
4828
8a1c38d1
EG
4829/* Calculates the sum of vn_min_rates.
4830 It's needed for further normalizing of the min_rates.
4831 Returns:
4832 sum of vn_min_rates.
4833 or
4834 0 - if all the min_rates are 0.
4835 In the later case fainess algorithm should be deactivated.
4836 If not all min_rates are zero then those that are zeroes will be set to 1.
4837 */
4838static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4839{
4840 int all_zero = 1;
4841 int port = BP_PORT(bp);
4842 int vn;
4843
4844 bp->vn_weight_sum = 0;
4845 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4846 int func = 2*vn + port;
4847 u32 vn_cfg =
4848 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4849 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4850 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4851
4852 /* Skip hidden vns */
4853 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4854 continue;
4855
4856 /* If min rate is zero - set it to 1 */
4857 if (!vn_min_rate)
4858 vn_min_rate = DEF_MIN_RATE;
4859 else
4860 all_zero = 0;
4861
4862 bp->vn_weight_sum += vn_min_rate;
4863 }
4864
4865 /* ... only if all min rates are zeros - disable fairness */
4866 if (all_zero)
4867 bp->vn_weight_sum = 0;
4868}
4869
471de716 4870static void bnx2x_init_internal_func(struct bnx2x *bp)
a2fbb9ea 4871{
a2fbb9ea
ET
4872 struct tstorm_eth_function_common_config tstorm_config = {0};
4873 struct stats_indication_flags stats_flags = {0};
34f80b04
EG
4874 int port = BP_PORT(bp);
4875 int func = BP_FUNC(bp);
de832a55
EG
4876 int i, j;
4877 u32 offset;
471de716 4878 u16 max_agg_size;
a2fbb9ea
ET
4879
4880 if (is_multi(bp)) {
555f6c78 4881 tstorm_config.config_flags = MULTI_FLAGS(bp);
a2fbb9ea
ET
4882 tstorm_config.rss_result_mask = MULTI_MASK;
4883 }
8d9c5f34
EG
4884 if (IS_E1HMF(bp))
4885 tstorm_config.config_flags |=
4886 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
a2fbb9ea 4887
34f80b04
EG
4888 tstorm_config.leading_client_id = BP_L_ID(bp);
4889
a2fbb9ea 4890 REG_WR(bp, BAR_TSTRORM_INTMEM +
34f80b04 4891 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
a2fbb9ea
ET
4892 (*(u32 *)&tstorm_config));
4893
c14423fe 4894 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
a2fbb9ea
ET
4895 bnx2x_set_storm_rx_mode(bp);
4896
de832a55
EG
4897 for_each_queue(bp, i) {
4898 u8 cl_id = bp->fp[i].cl_id;
4899
4900 /* reset xstorm per client statistics */
4901 offset = BAR_XSTRORM_INTMEM +
4902 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4903 for (j = 0;
4904 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
4905 REG_WR(bp, offset + j*4, 0);
4906
4907 /* reset tstorm per client statistics */
4908 offset = BAR_TSTRORM_INTMEM +
4909 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4910 for (j = 0;
4911 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
4912 REG_WR(bp, offset + j*4, 0);
4913
4914 /* reset ustorm per client statistics */
4915 offset = BAR_USTRORM_INTMEM +
4916 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4917 for (j = 0;
4918 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
4919 REG_WR(bp, offset + j*4, 0);
66e855f3
YG
4920 }
4921
4922 /* Init statistics related context */
34f80b04 4923 stats_flags.collect_eth = 1;
a2fbb9ea 4924
66e855f3 4925 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4926 ((u32 *)&stats_flags)[0]);
66e855f3 4927 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4928 ((u32 *)&stats_flags)[1]);
4929
66e855f3 4930 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4931 ((u32 *)&stats_flags)[0]);
66e855f3 4932 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4933 ((u32 *)&stats_flags)[1]);
4934
de832a55
EG
4935 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
4936 ((u32 *)&stats_flags)[0]);
4937 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
4938 ((u32 *)&stats_flags)[1]);
4939
66e855f3 4940 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
a2fbb9ea 4941 ((u32 *)&stats_flags)[0]);
66e855f3 4942 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
a2fbb9ea
ET
4943 ((u32 *)&stats_flags)[1]);
4944
66e855f3
YG
4945 REG_WR(bp, BAR_XSTRORM_INTMEM +
4946 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4947 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4948 REG_WR(bp, BAR_XSTRORM_INTMEM +
4949 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4950 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4951
4952 REG_WR(bp, BAR_TSTRORM_INTMEM +
4953 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4954 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4955 REG_WR(bp, BAR_TSTRORM_INTMEM +
4956 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4957 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
34f80b04 4958
de832a55
EG
4959 REG_WR(bp, BAR_USTRORM_INTMEM +
4960 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
4961 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
4962 REG_WR(bp, BAR_USTRORM_INTMEM +
4963 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
4964 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
4965
34f80b04
EG
4966 if (CHIP_IS_E1H(bp)) {
4967 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
4968 IS_E1HMF(bp));
4969 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
4970 IS_E1HMF(bp));
4971 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
4972 IS_E1HMF(bp));
4973 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
4974 IS_E1HMF(bp));
4975
7a9b2557
VZ
4976 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
4977 bp->e1hov);
34f80b04
EG
4978 }
4979
4f40f2cb
EG
4980 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
4981 max_agg_size =
4982 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
4983 SGE_PAGE_SIZE * PAGES_PER_SGE),
4984 (u32)0xffff);
555f6c78 4985 for_each_rx_queue(bp, i) {
7a9b2557 4986 struct bnx2x_fastpath *fp = &bp->fp[i];
7a9b2557
VZ
4987
4988 REG_WR(bp, BAR_USTRORM_INTMEM +
4989 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)),
4990 U64_LO(fp->rx_comp_mapping));
4991 REG_WR(bp, BAR_USTRORM_INTMEM +
4992 USTORM_CQE_PAGE_BASE_OFFSET(port, FP_CL_ID(fp)) + 4,
4993 U64_HI(fp->rx_comp_mapping));
4994
7a9b2557
VZ
4995 REG_WR16(bp, BAR_USTRORM_INTMEM +
4996 USTORM_MAX_AGG_SIZE_OFFSET(port, FP_CL_ID(fp)),
4997 max_agg_size);
4998 }
8a1c38d1 4999
1c06328c
EG
5000 /* dropless flow control */
5001 if (CHIP_IS_E1H(bp)) {
5002 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5003
5004 rx_pause.bd_thr_low = 250;
5005 rx_pause.cqe_thr_low = 250;
5006 rx_pause.cos = 1;
5007 rx_pause.sge_thr_low = 0;
5008 rx_pause.bd_thr_high = 350;
5009 rx_pause.cqe_thr_high = 350;
5010 rx_pause.sge_thr_high = 0;
5011
5012 for_each_rx_queue(bp, i) {
5013 struct bnx2x_fastpath *fp = &bp->fp[i];
5014
5015 if (!fp->disable_tpa) {
5016 rx_pause.sge_thr_low = 150;
5017 rx_pause.sge_thr_high = 250;
5018 }
5019
5020
5021 offset = BAR_USTRORM_INTMEM +
5022 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5023 fp->cl_id);
5024 for (j = 0;
5025 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5026 j++)
5027 REG_WR(bp, offset + j*4,
5028 ((u32 *)&rx_pause)[j]);
5029 }
5030 }
5031
8a1c38d1
EG
5032 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5033
5034 /* Init rate shaping and fairness contexts */
5035 if (IS_E1HMF(bp)) {
5036 int vn;
5037
5038 /* During init there is no active link
5039 Until link is up, set link rate to 10Gbps */
5040 bp->link_vars.line_speed = SPEED_10000;
5041 bnx2x_init_port_minmax(bp);
5042
5043 bnx2x_calc_vn_weight_sum(bp);
5044
5045 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5046 bnx2x_init_vn_minmax(bp, 2*vn + port);
5047
5048 /* Enable rate shaping and fairness */
5049 bp->cmng.flags.cmng_enables =
5050 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5051 if (bp->vn_weight_sum)
5052 bp->cmng.flags.cmng_enables |=
5053 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5054 else
5055 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5056 " fairness will be disabled\n");
5057 } else {
5058 /* rate shaping and fairness are disabled */
5059 DP(NETIF_MSG_IFUP,
5060 "single function mode minmax will be disabled\n");
5061 }
5062
5063
5064 /* Store it to internal memory */
5065 if (bp->port.pmf)
5066 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5067 REG_WR(bp, BAR_XSTRORM_INTMEM +
5068 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5069 ((u32 *)(&bp->cmng))[i]);
a2fbb9ea
ET
5070}
5071
471de716
EG
5072static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5073{
5074 switch (load_code) {
5075 case FW_MSG_CODE_DRV_LOAD_COMMON:
5076 bnx2x_init_internal_common(bp);
5077 /* no break */
5078
5079 case FW_MSG_CODE_DRV_LOAD_PORT:
5080 bnx2x_init_internal_port(bp);
5081 /* no break */
5082
5083 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5084 bnx2x_init_internal_func(bp);
5085 break;
5086
5087 default:
5088 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5089 break;
5090 }
5091}
5092
5093static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
a2fbb9ea
ET
5094{
5095 int i;
5096
5097 for_each_queue(bp, i) {
5098 struct bnx2x_fastpath *fp = &bp->fp[i];
5099
34f80b04 5100 fp->bp = bp;
a2fbb9ea 5101 fp->state = BNX2X_FP_STATE_CLOSED;
a2fbb9ea 5102 fp->index = i;
34f80b04
EG
5103 fp->cl_id = BP_L_ID(bp) + i;
5104 fp->sb_id = fp->cl_id;
5105 DP(NETIF_MSG_IFUP,
5106 "bnx2x_init_sb(%p,%p) index %d cl_id %d sb %d\n",
5107 bp, fp->status_blk, i, FP_CL_ID(fp), FP_SB_ID(fp));
5c862848
EG
5108 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5109 FP_SB_ID(fp));
5110 bnx2x_update_fpsb_idx(fp);
a2fbb9ea
ET
5111 }
5112
5c862848
EG
5113 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5114 DEF_SB_ID);
5115 bnx2x_update_dsb_idx(bp);
a2fbb9ea
ET
5116 bnx2x_update_coalesce(bp);
5117 bnx2x_init_rx_rings(bp);
5118 bnx2x_init_tx_ring(bp);
5119 bnx2x_init_sp_ring(bp);
5120 bnx2x_init_context(bp);
471de716 5121 bnx2x_init_internal(bp, load_code);
a2fbb9ea 5122 bnx2x_init_ind_table(bp);
0ef00459
EG
5123 bnx2x_stats_init(bp);
5124
5125 /* At this point, we are ready for interrupts */
5126 atomic_set(&bp->intr_sem, 0);
5127
5128 /* flush all before enabling interrupts */
5129 mb();
5130 mmiowb();
5131
615f8fd9 5132 bnx2x_int_enable(bp);
a2fbb9ea
ET
5133}
5134
5135/* end of nic init */
5136
5137/*
5138 * gzip service functions
5139 */
5140
5141static int bnx2x_gunzip_init(struct bnx2x *bp)
5142{
5143 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5144 &bp->gunzip_mapping);
5145 if (bp->gunzip_buf == NULL)
5146 goto gunzip_nomem1;
5147
5148 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5149 if (bp->strm == NULL)
5150 goto gunzip_nomem2;
5151
5152 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5153 GFP_KERNEL);
5154 if (bp->strm->workspace == NULL)
5155 goto gunzip_nomem3;
5156
5157 return 0;
5158
5159gunzip_nomem3:
5160 kfree(bp->strm);
5161 bp->strm = NULL;
5162
5163gunzip_nomem2:
5164 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5165 bp->gunzip_mapping);
5166 bp->gunzip_buf = NULL;
5167
5168gunzip_nomem1:
5169 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
34f80b04 5170 " un-compression\n", bp->dev->name);
a2fbb9ea
ET
5171 return -ENOMEM;
5172}
5173
5174static void bnx2x_gunzip_end(struct bnx2x *bp)
5175{
5176 kfree(bp->strm->workspace);
5177
5178 kfree(bp->strm);
5179 bp->strm = NULL;
5180
5181 if (bp->gunzip_buf) {
5182 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5183 bp->gunzip_mapping);
5184 bp->gunzip_buf = NULL;
5185 }
5186}
5187
5188static int bnx2x_gunzip(struct bnx2x *bp, u8 *zbuf, int len)
5189{
5190 int n, rc;
5191
5192 /* check gzip header */
5193 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED))
5194 return -EINVAL;
5195
5196 n = 10;
5197
34f80b04 5198#define FNAME 0x8
a2fbb9ea
ET
5199
5200 if (zbuf[3] & FNAME)
5201 while ((zbuf[n++] != 0) && (n < len));
5202
5203 bp->strm->next_in = zbuf + n;
5204 bp->strm->avail_in = len - n;
5205 bp->strm->next_out = bp->gunzip_buf;
5206 bp->strm->avail_out = FW_BUF_SIZE;
5207
5208 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5209 if (rc != Z_OK)
5210 return rc;
5211
5212 rc = zlib_inflate(bp->strm, Z_FINISH);
5213 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5214 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5215 bp->dev->name, bp->strm->msg);
5216
5217 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5218 if (bp->gunzip_outlen & 0x3)
5219 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5220 " gunzip_outlen (%d) not aligned\n",
5221 bp->dev->name, bp->gunzip_outlen);
5222 bp->gunzip_outlen >>= 2;
5223
5224 zlib_inflateEnd(bp->strm);
5225
5226 if (rc == Z_STREAM_END)
5227 return 0;
5228
5229 return rc;
5230}
5231
5232/* nic load/unload */
5233
5234/*
34f80b04 5235 * General service functions
a2fbb9ea
ET
5236 */
5237
5238/* send a NIG loopback debug packet */
5239static void bnx2x_lb_pckt(struct bnx2x *bp)
5240{
a2fbb9ea 5241 u32 wb_write[3];
a2fbb9ea
ET
5242
5243 /* Ethernet source and destination addresses */
a2fbb9ea
ET
5244 wb_write[0] = 0x55555555;
5245 wb_write[1] = 0x55555555;
34f80b04 5246 wb_write[2] = 0x20; /* SOP */
a2fbb9ea 5247 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5248
5249 /* NON-IP protocol */
a2fbb9ea
ET
5250 wb_write[0] = 0x09000000;
5251 wb_write[1] = 0x55555555;
34f80b04 5252 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
a2fbb9ea 5253 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
a2fbb9ea
ET
5254}
5255
5256/* some of the internal memories
5257 * are not directly readable from the driver
5258 * to test them we send debug packets
5259 */
5260static int bnx2x_int_mem_test(struct bnx2x *bp)
5261{
5262 int factor;
5263 int count, i;
5264 u32 val = 0;
5265
ad8d3948 5266 if (CHIP_REV_IS_FPGA(bp))
a2fbb9ea 5267 factor = 120;
ad8d3948
EG
5268 else if (CHIP_REV_IS_EMUL(bp))
5269 factor = 200;
5270 else
a2fbb9ea 5271 factor = 1;
a2fbb9ea
ET
5272
5273 DP(NETIF_MSG_HW, "start part1\n");
5274
5275 /* Disable inputs of parser neighbor blocks */
5276 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5277 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5278 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5279 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5280
5281 /* Write 0 to parser credits for CFC search request */
5282 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5283
5284 /* send Ethernet packet */
5285 bnx2x_lb_pckt(bp);
5286
5287 /* TODO do i reset NIG statistic? */
5288 /* Wait until NIG register shows 1 packet of size 0x10 */
5289 count = 1000 * factor;
5290 while (count) {
34f80b04 5291
a2fbb9ea
ET
5292 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5293 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5294 if (val == 0x10)
5295 break;
5296
5297 msleep(10);
5298 count--;
5299 }
5300 if (val != 0x10) {
5301 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5302 return -1;
5303 }
5304
5305 /* Wait until PRS register shows 1 packet */
5306 count = 1000 * factor;
5307 while (count) {
5308 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
a2fbb9ea
ET
5309 if (val == 1)
5310 break;
5311
5312 msleep(10);
5313 count--;
5314 }
5315 if (val != 0x1) {
5316 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5317 return -2;
5318 }
5319
5320 /* Reset and init BRB, PRS */
34f80b04 5321 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
a2fbb9ea 5322 msleep(50);
34f80b04 5323 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
a2fbb9ea
ET
5324 msleep(50);
5325 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5326 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5327
5328 DP(NETIF_MSG_HW, "part2\n");
5329
5330 /* Disable inputs of parser neighbor blocks */
5331 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5332 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5333 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3196a88a 5334 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
a2fbb9ea
ET
5335
5336 /* Write 0 to parser credits for CFC search request */
5337 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5338
5339 /* send 10 Ethernet packets */
5340 for (i = 0; i < 10; i++)
5341 bnx2x_lb_pckt(bp);
5342
5343 /* Wait until NIG register shows 10 + 1
5344 packets of size 11*0x10 = 0xb0 */
5345 count = 1000 * factor;
5346 while (count) {
34f80b04 5347
a2fbb9ea
ET
5348 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5349 val = *bnx2x_sp(bp, wb_data[0]);
a2fbb9ea
ET
5350 if (val == 0xb0)
5351 break;
5352
5353 msleep(10);
5354 count--;
5355 }
5356 if (val != 0xb0) {
5357 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5358 return -3;
5359 }
5360
5361 /* Wait until PRS register shows 2 packets */
5362 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5363 if (val != 2)
5364 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5365
5366 /* Write 1 to parser credits for CFC search request */
5367 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5368
5369 /* Wait until PRS register shows 3 packets */
5370 msleep(10 * factor);
5371 /* Wait until NIG register shows 1 packet of size 0x10 */
5372 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5373 if (val != 3)
5374 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5375
5376 /* clear NIG EOP FIFO */
5377 for (i = 0; i < 11; i++)
5378 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5379 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5380 if (val != 1) {
5381 BNX2X_ERR("clear of NIG failed\n");
5382 return -4;
5383 }
5384
5385 /* Reset and init BRB, PRS, NIG */
5386 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5387 msleep(50);
5388 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5389 msleep(50);
5390 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
5391 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
5392#ifndef BCM_ISCSI
5393 /* set NIC mode */
5394 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5395#endif
5396
5397 /* Enable inputs of parser neighbor blocks */
5398 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5399 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5400 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3196a88a 5401 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
a2fbb9ea
ET
5402
5403 DP(NETIF_MSG_HW, "done\n");
5404
5405 return 0; /* OK */
5406}
5407
5408static void enable_blocks_attention(struct bnx2x *bp)
5409{
5410 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5411 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5412 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5413 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5414 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5415 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5416 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5417 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5418 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
34f80b04
EG
5419/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5420/* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5421 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5422 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5423 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
34f80b04
EG
5424/* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5425/* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5426 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5427 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5428 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5429 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
34f80b04
EG
5430/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5431/* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5432 if (CHIP_REV_IS_FPGA(bp))
5433 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5434 else
5435 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
a2fbb9ea
ET
5436 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5437 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5438 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
34f80b04
EG
5439/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5440/* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
a2fbb9ea
ET
5441 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5442 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
34f80b04
EG
5443/* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5444 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
a2fbb9ea
ET
5445}
5446
34f80b04 5447
81f75bbf
EG
5448static void bnx2x_reset_common(struct bnx2x *bp)
5449{
5450 /* reset_common */
5451 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5452 0xd3ffff7f);
5453 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5454}
5455
34f80b04 5456static int bnx2x_init_common(struct bnx2x *bp)
a2fbb9ea 5457{
a2fbb9ea 5458 u32 val, i;
a2fbb9ea 5459
34f80b04 5460 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
a2fbb9ea 5461
81f75bbf 5462 bnx2x_reset_common(bp);
34f80b04
EG
5463 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5464 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
a2fbb9ea 5465
34f80b04
EG
5466 bnx2x_init_block(bp, MISC_COMMON_START, MISC_COMMON_END);
5467 if (CHIP_IS_E1H(bp))
5468 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
a2fbb9ea 5469
34f80b04
EG
5470 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5471 msleep(30);
5472 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
a2fbb9ea 5473
34f80b04
EG
5474 bnx2x_init_block(bp, PXP_COMMON_START, PXP_COMMON_END);
5475 if (CHIP_IS_E1(bp)) {
5476 /* enable HW interrupt from PXP on USDM overflow
5477 bit 16 on INT_MASK_0 */
5478 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5479 }
a2fbb9ea 5480
34f80b04
EG
5481 bnx2x_init_block(bp, PXP2_COMMON_START, PXP2_COMMON_END);
5482 bnx2x_init_pxp(bp);
a2fbb9ea
ET
5483
5484#ifdef __BIG_ENDIAN
34f80b04
EG
5485 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5486 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5487 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5488 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5489 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
8badd27a
EG
5490 /* make sure this value is 0 */
5491 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
34f80b04
EG
5492
5493/* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5494 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5495 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5496 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5497 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
a2fbb9ea
ET
5498#endif
5499
34f80b04 5500 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
a2fbb9ea 5501#ifdef BCM_ISCSI
34f80b04
EG
5502 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5503 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5504 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
a2fbb9ea
ET
5505#endif
5506
34f80b04
EG
5507 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5508 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
a2fbb9ea 5509
34f80b04
EG
5510 /* let the HW do it's magic ... */
5511 msleep(100);
5512 /* finish PXP init */
5513 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5514 if (val != 1) {
5515 BNX2X_ERR("PXP2 CFG failed\n");
5516 return -EBUSY;
5517 }
5518 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5519 if (val != 1) {
5520 BNX2X_ERR("PXP2 RD_INIT failed\n");
5521 return -EBUSY;
5522 }
a2fbb9ea 5523
34f80b04
EG
5524 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5525 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
a2fbb9ea 5526
34f80b04 5527 bnx2x_init_block(bp, DMAE_COMMON_START, DMAE_COMMON_END);
a2fbb9ea 5528
34f80b04
EG
5529 /* clean the DMAE memory */
5530 bp->dmae_ready = 1;
5531 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
a2fbb9ea 5532
34f80b04
EG
5533 bnx2x_init_block(bp, TCM_COMMON_START, TCM_COMMON_END);
5534 bnx2x_init_block(bp, UCM_COMMON_START, UCM_COMMON_END);
5535 bnx2x_init_block(bp, CCM_COMMON_START, CCM_COMMON_END);
5536 bnx2x_init_block(bp, XCM_COMMON_START, XCM_COMMON_END);
a2fbb9ea 5537
34f80b04
EG
5538 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5539 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5540 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5541 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5542
5543 bnx2x_init_block(bp, QM_COMMON_START, QM_COMMON_END);
5544 /* soft reset pulse */
5545 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5546 REG_WR(bp, QM_REG_SOFT_RESET, 0);
a2fbb9ea
ET
5547
5548#ifdef BCM_ISCSI
34f80b04 5549 bnx2x_init_block(bp, TIMERS_COMMON_START, TIMERS_COMMON_END);
a2fbb9ea 5550#endif
a2fbb9ea 5551
34f80b04
EG
5552 bnx2x_init_block(bp, DQ_COMMON_START, DQ_COMMON_END);
5553 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5554 if (!CHIP_REV_IS_SLOW(bp)) {
5555 /* enable hw interrupt from doorbell Q */
5556 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5557 }
a2fbb9ea 5558
34f80b04 5559 bnx2x_init_block(bp, BRB1_COMMON_START, BRB1_COMMON_END);
34f80b04 5560 bnx2x_init_block(bp, PRS_COMMON_START, PRS_COMMON_END);
26c8fa4d 5561 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
3196a88a
EG
5562 /* set NIC mode */
5563 REG_WR(bp, PRS_REG_NIC_MODE, 1);
34f80b04
EG
5564 if (CHIP_IS_E1H(bp))
5565 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
a2fbb9ea 5566
34f80b04
EG
5567 bnx2x_init_block(bp, TSDM_COMMON_START, TSDM_COMMON_END);
5568 bnx2x_init_block(bp, CSDM_COMMON_START, CSDM_COMMON_END);
5569 bnx2x_init_block(bp, USDM_COMMON_START, USDM_COMMON_END);
5570 bnx2x_init_block(bp, XSDM_COMMON_START, XSDM_COMMON_END);
a2fbb9ea 5571
34f80b04
EG
5572 if (CHIP_IS_E1H(bp)) {
5573 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5574 STORM_INTMEM_SIZE_E1H/2);
5575 bnx2x_init_fill(bp,
5576 TSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5577 0, STORM_INTMEM_SIZE_E1H/2);
5578 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5579 STORM_INTMEM_SIZE_E1H/2);
5580 bnx2x_init_fill(bp,
5581 CSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5582 0, STORM_INTMEM_SIZE_E1H/2);
5583 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5584 STORM_INTMEM_SIZE_E1H/2);
5585 bnx2x_init_fill(bp,
5586 XSTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5587 0, STORM_INTMEM_SIZE_E1H/2);
5588 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5589 STORM_INTMEM_SIZE_E1H/2);
5590 bnx2x_init_fill(bp,
5591 USTORM_INTMEM_ADDR + STORM_INTMEM_SIZE_E1H/2,
5592 0, STORM_INTMEM_SIZE_E1H/2);
5593 } else { /* E1 */
ad8d3948
EG
5594 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0,
5595 STORM_INTMEM_SIZE_E1);
5596 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0,
5597 STORM_INTMEM_SIZE_E1);
5598 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0,
5599 STORM_INTMEM_SIZE_E1);
5600 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0,
5601 STORM_INTMEM_SIZE_E1);
34f80b04 5602 }
a2fbb9ea 5603
34f80b04
EG
5604 bnx2x_init_block(bp, TSEM_COMMON_START, TSEM_COMMON_END);
5605 bnx2x_init_block(bp, USEM_COMMON_START, USEM_COMMON_END);
5606 bnx2x_init_block(bp, CSEM_COMMON_START, CSEM_COMMON_END);
5607 bnx2x_init_block(bp, XSEM_COMMON_START, XSEM_COMMON_END);
a2fbb9ea 5608
34f80b04
EG
5609 /* sync semi rtc */
5610 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5611 0x80000000);
5612 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5613 0x80000000);
a2fbb9ea 5614
34f80b04
EG
5615 bnx2x_init_block(bp, UPB_COMMON_START, UPB_COMMON_END);
5616 bnx2x_init_block(bp, XPB_COMMON_START, XPB_COMMON_END);
5617 bnx2x_init_block(bp, PBF_COMMON_START, PBF_COMMON_END);
a2fbb9ea 5618
34f80b04
EG
5619 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5620 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5621 REG_WR(bp, i, 0xc0cac01a);
5622 /* TODO: replace with something meaningful */
5623 }
8d9c5f34 5624 bnx2x_init_block(bp, SRCH_COMMON_START, SRCH_COMMON_END);
34f80b04 5625 REG_WR(bp, SRC_REG_SOFT_RST, 0);
a2fbb9ea 5626
34f80b04
EG
5627 if (sizeof(union cdu_context) != 1024)
5628 /* we currently assume that a context is 1024 bytes */
5629 printk(KERN_ALERT PFX "please adjust the size of"
5630 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
a2fbb9ea 5631
34f80b04
EG
5632 bnx2x_init_block(bp, CDU_COMMON_START, CDU_COMMON_END);
5633 val = (4 << 24) + (0 << 12) + 1024;
5634 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5635 if (CHIP_IS_E1(bp)) {
5636 /* !!! fix pxp client crdit until excel update */
5637 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5638 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5639 }
a2fbb9ea 5640
34f80b04
EG
5641 bnx2x_init_block(bp, CFC_COMMON_START, CFC_COMMON_END);
5642 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
8d9c5f34
EG
5643 /* enable context validation interrupt from CFC */
5644 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5645
5646 /* set the thresholds to prevent CFC/CDU race */
5647 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
a2fbb9ea 5648
34f80b04
EG
5649 bnx2x_init_block(bp, HC_COMMON_START, HC_COMMON_END);
5650 bnx2x_init_block(bp, MISC_AEU_COMMON_START, MISC_AEU_COMMON_END);
a2fbb9ea 5651
34f80b04
EG
5652 /* PXPCS COMMON comes here */
5653 /* Reset PCIE errors for debug */
5654 REG_WR(bp, 0x2814, 0xffffffff);
5655 REG_WR(bp, 0x3820, 0xffffffff);
a2fbb9ea 5656
34f80b04
EG
5657 /* EMAC0 COMMON comes here */
5658 /* EMAC1 COMMON comes here */
5659 /* DBU COMMON comes here */
5660 /* DBG COMMON comes here */
5661
5662 bnx2x_init_block(bp, NIG_COMMON_START, NIG_COMMON_END);
5663 if (CHIP_IS_E1H(bp)) {
5664 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5665 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5666 }
5667
5668 if (CHIP_REV_IS_SLOW(bp))
5669 msleep(200);
5670
5671 /* finish CFC init */
5672 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5673 if (val != 1) {
5674 BNX2X_ERR("CFC LL_INIT failed\n");
5675 return -EBUSY;
5676 }
5677 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5678 if (val != 1) {
5679 BNX2X_ERR("CFC AC_INIT failed\n");
5680 return -EBUSY;
5681 }
5682 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5683 if (val != 1) {
5684 BNX2X_ERR("CFC CAM_INIT failed\n");
5685 return -EBUSY;
5686 }
5687 REG_WR(bp, CFC_REG_DEBUG0, 0);
f1410647 5688
34f80b04
EG
5689 /* read NIG statistic
5690 to see if this is our first up since powerup */
5691 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5692 val = *bnx2x_sp(bp, wb_data[0]);
5693
5694 /* do internal memory self test */
5695 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5696 BNX2X_ERR("internal mem self test failed\n");
5697 return -EBUSY;
5698 }
5699
35b19ba5
EG
5700 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5701 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
34f80b04
EG
5702 /* Fan failure is indicated by SPIO 5 */
5703 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5704 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5705
5706 /* set to active low mode */
5707 val = REG_RD(bp, MISC_REG_SPIO_INT);
5708 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
f1410647 5709 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
34f80b04 5710 REG_WR(bp, MISC_REG_SPIO_INT, val);
f1410647 5711
34f80b04
EG
5712 /* enable interrupt to signal the IGU */
5713 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5714 val |= (1 << MISC_REGISTERS_SPIO_5);
5715 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5716 break;
f1410647 5717
34f80b04
EG
5718 default:
5719 break;
5720 }
f1410647 5721
34f80b04
EG
5722 /* clear PXP2 attentions */
5723 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
a2fbb9ea 5724
34f80b04 5725 enable_blocks_attention(bp);
a2fbb9ea 5726
6bbca910
YR
5727 if (!BP_NOMCP(bp)) {
5728 bnx2x_acquire_phy_lock(bp);
5729 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5730 bnx2x_release_phy_lock(bp);
5731 } else
5732 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5733
34f80b04
EG
5734 return 0;
5735}
a2fbb9ea 5736
34f80b04
EG
5737static int bnx2x_init_port(struct bnx2x *bp)
5738{
5739 int port = BP_PORT(bp);
1c06328c 5740 u32 low, high;
34f80b04 5741 u32 val;
a2fbb9ea 5742
34f80b04
EG
5743 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5744
5745 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
a2fbb9ea
ET
5746
5747 /* Port PXP comes here */
5748 /* Port PXP2 comes here */
a2fbb9ea
ET
5749#ifdef BCM_ISCSI
5750 /* Port0 1
5751 * Port1 385 */
5752 i++;
5753 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5754 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5755 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5756 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5757
5758 /* Port0 2
5759 * Port1 386 */
5760 i++;
5761 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5762 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5763 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5764 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5765
5766 /* Port0 3
5767 * Port1 387 */
5768 i++;
5769 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5770 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5771 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5772 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5773#endif
34f80b04 5774 /* Port CMs come here */
8d9c5f34
EG
5775 bnx2x_init_block(bp, (port ? XCM_PORT1_START : XCM_PORT0_START),
5776 (port ? XCM_PORT1_END : XCM_PORT0_END));
a2fbb9ea
ET
5777
5778 /* Port QM comes here */
a2fbb9ea
ET
5779#ifdef BCM_ISCSI
5780 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5781 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5782
5783 bnx2x_init_block(bp, func ? TIMERS_PORT1_START : TIMERS_PORT0_START,
5784 func ? TIMERS_PORT1_END : TIMERS_PORT0_END);
5785#endif
5786 /* Port DQ comes here */
1c06328c
EG
5787
5788 bnx2x_init_block(bp, (port ? BRB1_PORT1_START : BRB1_PORT0_START),
5789 (port ? BRB1_PORT1_END : BRB1_PORT0_END));
5790 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5791 /* no pause for emulation and FPGA */
5792 low = 0;
5793 high = 513;
5794 } else {
5795 if (IS_E1HMF(bp))
5796 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5797 else if (bp->dev->mtu > 4096) {
5798 if (bp->flags & ONE_PORT_FLAG)
5799 low = 160;
5800 else {
5801 val = bp->dev->mtu;
5802 /* (24*1024 + val*4)/256 */
5803 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5804 }
5805 } else
5806 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5807 high = low + 56; /* 14*1024/256 */
5808 }
5809 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5810 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5811
5812
ad8d3948 5813 /* Port PRS comes here */
a2fbb9ea
ET
5814 /* Port TSDM comes here */
5815 /* Port CSDM comes here */
5816 /* Port USDM comes here */
5817 /* Port XSDM comes here */
34f80b04
EG
5818 bnx2x_init_block(bp, port ? TSEM_PORT1_START : TSEM_PORT0_START,
5819 port ? TSEM_PORT1_END : TSEM_PORT0_END);
5820 bnx2x_init_block(bp, port ? USEM_PORT1_START : USEM_PORT0_START,
5821 port ? USEM_PORT1_END : USEM_PORT0_END);
5822 bnx2x_init_block(bp, port ? CSEM_PORT1_START : CSEM_PORT0_START,
5823 port ? CSEM_PORT1_END : CSEM_PORT0_END);
5824 bnx2x_init_block(bp, port ? XSEM_PORT1_START : XSEM_PORT0_START,
5825 port ? XSEM_PORT1_END : XSEM_PORT0_END);
a2fbb9ea 5826 /* Port UPB comes here */
34f80b04
EG
5827 /* Port XPB comes here */
5828
5829 bnx2x_init_block(bp, port ? PBF_PORT1_START : PBF_PORT0_START,
5830 port ? PBF_PORT1_END : PBF_PORT0_END);
a2fbb9ea
ET
5831
5832 /* configure PBF to work without PAUSE mtu 9000 */
34f80b04 5833 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
a2fbb9ea
ET
5834
5835 /* update threshold */
34f80b04 5836 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
a2fbb9ea 5837 /* update init credit */
34f80b04 5838 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
a2fbb9ea
ET
5839
5840 /* probe changes */
34f80b04 5841 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
a2fbb9ea 5842 msleep(5);
34f80b04 5843 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
a2fbb9ea
ET
5844
5845#ifdef BCM_ISCSI
5846 /* tell the searcher where the T2 table is */
5847 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5848
5849 wb_write[0] = U64_LO(bp->t2_mapping);
5850 wb_write[1] = U64_HI(bp->t2_mapping);
5851 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5852 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5853 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5854 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5855
5856 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5857 /* Port SRCH comes here */
5858#endif
5859 /* Port CDU comes here */
5860 /* Port CFC comes here */
34f80b04
EG
5861
5862 if (CHIP_IS_E1(bp)) {
5863 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5864 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5865 }
5866 bnx2x_init_block(bp, port ? HC_PORT1_START : HC_PORT0_START,
5867 port ? HC_PORT1_END : HC_PORT0_END);
5868
5869 bnx2x_init_block(bp, port ? MISC_AEU_PORT1_START :
a2fbb9ea 5870 MISC_AEU_PORT0_START,
34f80b04
EG
5871 port ? MISC_AEU_PORT1_END : MISC_AEU_PORT0_END);
5872 /* init aeu_mask_attn_func_0/1:
5873 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
5874 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
5875 * bits 4-7 are used for "per vn group attention" */
5876 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
5877 (IS_E1HMF(bp) ? 0xF7 : 0x7));
5878
a2fbb9ea
ET
5879 /* Port PXPCS comes here */
5880 /* Port EMAC0 comes here */
5881 /* Port EMAC1 comes here */
5882 /* Port DBU comes here */
5883 /* Port DBG comes here */
34f80b04
EG
5884 bnx2x_init_block(bp, port ? NIG_PORT1_START : NIG_PORT0_START,
5885 port ? NIG_PORT1_END : NIG_PORT0_END);
5886
5887 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
5888
5889 if (CHIP_IS_E1H(bp)) {
34f80b04
EG
5890 /* 0x2 disable e1hov, 0x1 enable */
5891 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
5892 (IS_E1HMF(bp) ? 0x1 : 0x2));
5893
1c06328c
EG
5894 /* support pause requests from USDM, TSDM and BRB */
5895 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
5896
5897 {
5898 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
5899 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
5900 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
5901 }
34f80b04
EG
5902 }
5903
a2fbb9ea
ET
5904 /* Port MCP comes here */
5905 /* Port DMAE comes here */
5906
35b19ba5
EG
5907 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5908 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
f1410647
ET
5909 /* add SPIO 5 to group 0 */
5910 val = REG_RD(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
5911 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
5912 REG_WR(bp, MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0, val);
5913 break;
5914
5915 default:
5916 break;
5917 }
5918
c18487ee 5919 bnx2x__link_reset(bp);
a2fbb9ea 5920
34f80b04
EG
5921 return 0;
5922}
5923
5924#define ILT_PER_FUNC (768/2)
5925#define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
5926/* the phys address is shifted right 12 bits and has an added
5927 1=valid bit added to the 53rd bit
5928 then since this is a wide register(TM)
5929 we split it into two 32 bit writes
5930 */
5931#define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
5932#define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
5933#define PXP_ONE_ILT(x) (((x) << 10) | x)
5934#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
5935
5936#define CNIC_ILT_LINES 0
5937
5938static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
5939{
5940 int reg;
5941
5942 if (CHIP_IS_E1H(bp))
5943 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
5944 else /* E1 */
5945 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
5946
5947 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
5948}
5949
5950static int bnx2x_init_func(struct bnx2x *bp)
5951{
5952 int port = BP_PORT(bp);
5953 int func = BP_FUNC(bp);
8badd27a 5954 u32 addr, val;
34f80b04
EG
5955 int i;
5956
5957 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
5958
8badd27a
EG
5959 /* set MSI reconfigure capability */
5960 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
5961 val = REG_RD(bp, addr);
5962 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
5963 REG_WR(bp, addr, val);
5964
34f80b04
EG
5965 i = FUNC_ILT_BASE(func);
5966
5967 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
5968 if (CHIP_IS_E1H(bp)) {
5969 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
5970 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
5971 } else /* E1 */
5972 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
5973 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
5974
5975
5976 if (CHIP_IS_E1H(bp)) {
5977 for (i = 0; i < 9; i++)
5978 bnx2x_init_block(bp,
5979 cm_start[func][i], cm_end[func][i]);
5980
5981 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
5982 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
5983 }
5984
5985 /* HC init per function */
5986 if (CHIP_IS_E1H(bp)) {
5987 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
5988
5989 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5990 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5991 }
5992 bnx2x_init_block(bp, hc_limits[func][0], hc_limits[func][1]);
5993
c14423fe 5994 /* Reset PCIE errors for debug */
a2fbb9ea
ET
5995 REG_WR(bp, 0x2114, 0xffffffff);
5996 REG_WR(bp, 0x2120, 0xffffffff);
a2fbb9ea 5997
34f80b04
EG
5998 return 0;
5999}
6000
6001static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6002{
6003 int i, rc = 0;
a2fbb9ea 6004
34f80b04
EG
6005 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6006 BP_FUNC(bp), load_code);
a2fbb9ea 6007
34f80b04
EG
6008 bp->dmae_ready = 0;
6009 mutex_init(&bp->dmae_mutex);
6010 bnx2x_gunzip_init(bp);
a2fbb9ea 6011
34f80b04
EG
6012 switch (load_code) {
6013 case FW_MSG_CODE_DRV_LOAD_COMMON:
6014 rc = bnx2x_init_common(bp);
6015 if (rc)
6016 goto init_hw_err;
6017 /* no break */
6018
6019 case FW_MSG_CODE_DRV_LOAD_PORT:
6020 bp->dmae_ready = 1;
6021 rc = bnx2x_init_port(bp);
6022 if (rc)
6023 goto init_hw_err;
6024 /* no break */
6025
6026 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6027 bp->dmae_ready = 1;
6028 rc = bnx2x_init_func(bp);
6029 if (rc)
6030 goto init_hw_err;
6031 break;
6032
6033 default:
6034 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6035 break;
6036 }
6037
6038 if (!BP_NOMCP(bp)) {
6039 int func = BP_FUNC(bp);
a2fbb9ea
ET
6040
6041 bp->fw_drv_pulse_wr_seq =
34f80b04 6042 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
a2fbb9ea 6043 DRV_PULSE_SEQ_MASK);
34f80b04
EG
6044 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6045 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
6046 bp->fw_drv_pulse_wr_seq, bp->func_stx);
6047 } else
6048 bp->func_stx = 0;
a2fbb9ea 6049
34f80b04
EG
6050 /* this needs to be done before gunzip end */
6051 bnx2x_zero_def_sb(bp);
6052 for_each_queue(bp, i)
6053 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6054
6055init_hw_err:
6056 bnx2x_gunzip_end(bp);
6057
6058 return rc;
a2fbb9ea
ET
6059}
6060
c14423fe 6061/* send the MCP a request, block until there is a reply */
a2fbb9ea
ET
6062static u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6063{
34f80b04 6064 int func = BP_FUNC(bp);
f1410647
ET
6065 u32 seq = ++bp->fw_seq;
6066 u32 rc = 0;
19680c48
EG
6067 u32 cnt = 1;
6068 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
a2fbb9ea 6069
34f80b04 6070 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
f1410647 6071 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
a2fbb9ea 6072
19680c48
EG
6073 do {
6074 /* let the FW do it's magic ... */
6075 msleep(delay);
a2fbb9ea 6076
19680c48 6077 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
a2fbb9ea 6078
19680c48
EG
6079 /* Give the FW up to 2 second (200*10ms) */
6080 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6081
6082 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6083 cnt*delay, rc, seq);
a2fbb9ea
ET
6084
6085 /* is this a reply to our command? */
6086 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6087 rc &= FW_MSG_CODE_MASK;
f1410647 6088
a2fbb9ea
ET
6089 } else {
6090 /* FW BUG! */
6091 BNX2X_ERR("FW failed to respond!\n");
6092 bnx2x_fw_dump(bp);
6093 rc = 0;
6094 }
f1410647 6095
a2fbb9ea
ET
6096 return rc;
6097}
6098
6099static void bnx2x_free_mem(struct bnx2x *bp)
6100{
6101
6102#define BNX2X_PCI_FREE(x, y, size) \
6103 do { \
6104 if (x) { \
6105 pci_free_consistent(bp->pdev, size, x, y); \
6106 x = NULL; \
6107 y = 0; \
6108 } \
6109 } while (0)
6110
6111#define BNX2X_FREE(x) \
6112 do { \
6113 if (x) { \
6114 vfree(x); \
6115 x = NULL; \
6116 } \
6117 } while (0)
6118
6119 int i;
6120
6121 /* fastpath */
555f6c78 6122 /* Common */
a2fbb9ea
ET
6123 for_each_queue(bp, i) {
6124
555f6c78 6125 /* status blocks */
a2fbb9ea
ET
6126 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6127 bnx2x_fp(bp, i, status_blk_mapping),
6128 sizeof(struct host_status_block) +
6129 sizeof(struct eth_tx_db_data));
555f6c78
EG
6130 }
6131 /* Rx */
6132 for_each_rx_queue(bp, i) {
a2fbb9ea 6133
555f6c78 6134 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6135 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6136 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6137 bnx2x_fp(bp, i, rx_desc_mapping),
6138 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6139
6140 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6141 bnx2x_fp(bp, i, rx_comp_mapping),
6142 sizeof(struct eth_fast_path_rx_cqe) *
6143 NUM_RCQ_BD);
a2fbb9ea 6144
7a9b2557 6145 /* SGE ring */
32626230 6146 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
7a9b2557
VZ
6147 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6148 bnx2x_fp(bp, i, rx_sge_mapping),
6149 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6150 }
555f6c78
EG
6151 /* Tx */
6152 for_each_tx_queue(bp, i) {
6153
6154 /* fastpath tx rings: tx_buf tx_desc */
6155 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6156 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6157 bnx2x_fp(bp, i, tx_desc_mapping),
6158 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6159 }
a2fbb9ea
ET
6160 /* end of fastpath */
6161
6162 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
34f80b04 6163 sizeof(struct host_def_status_block));
a2fbb9ea
ET
6164
6165 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
34f80b04 6166 sizeof(struct bnx2x_slowpath));
a2fbb9ea
ET
6167
6168#ifdef BCM_ISCSI
6169 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6170 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6171 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6172 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6173#endif
7a9b2557 6174 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
a2fbb9ea
ET
6175
6176#undef BNX2X_PCI_FREE
6177#undef BNX2X_KFREE
6178}
6179
6180static int bnx2x_alloc_mem(struct bnx2x *bp)
6181{
6182
6183#define BNX2X_PCI_ALLOC(x, y, size) \
6184 do { \
6185 x = pci_alloc_consistent(bp->pdev, size, y); \
6186 if (x == NULL) \
6187 goto alloc_mem_err; \
6188 memset(x, 0, size); \
6189 } while (0)
6190
6191#define BNX2X_ALLOC(x, size) \
6192 do { \
6193 x = vmalloc(size); \
6194 if (x == NULL) \
6195 goto alloc_mem_err; \
6196 memset(x, 0, size); \
6197 } while (0)
6198
6199 int i;
6200
6201 /* fastpath */
555f6c78 6202 /* Common */
a2fbb9ea
ET
6203 for_each_queue(bp, i) {
6204 bnx2x_fp(bp, i, bp) = bp;
6205
555f6c78 6206 /* status blocks */
a2fbb9ea
ET
6207 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6208 &bnx2x_fp(bp, i, status_blk_mapping),
6209 sizeof(struct host_status_block) +
6210 sizeof(struct eth_tx_db_data));
555f6c78
EG
6211 }
6212 /* Rx */
6213 for_each_rx_queue(bp, i) {
a2fbb9ea 6214
555f6c78 6215 /* fastpath rx rings: rx_buf rx_desc rx_comp */
a2fbb9ea
ET
6216 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6217 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6218 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6219 &bnx2x_fp(bp, i, rx_desc_mapping),
6220 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6221
6222 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6223 &bnx2x_fp(bp, i, rx_comp_mapping),
6224 sizeof(struct eth_fast_path_rx_cqe) *
6225 NUM_RCQ_BD);
6226
7a9b2557
VZ
6227 /* SGE ring */
6228 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6229 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6230 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6231 &bnx2x_fp(bp, i, rx_sge_mapping),
6232 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
a2fbb9ea 6233 }
555f6c78
EG
6234 /* Tx */
6235 for_each_tx_queue(bp, i) {
6236
6237 bnx2x_fp(bp, i, hw_tx_prods) =
6238 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6239
6240 bnx2x_fp(bp, i, tx_prods_mapping) =
6241 bnx2x_fp(bp, i, status_blk_mapping) +
6242 sizeof(struct host_status_block);
6243
6244 /* fastpath tx rings: tx_buf tx_desc */
6245 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6246 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6247 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6248 &bnx2x_fp(bp, i, tx_desc_mapping),
6249 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6250 }
a2fbb9ea
ET
6251 /* end of fastpath */
6252
6253 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6254 sizeof(struct host_def_status_block));
6255
6256 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6257 sizeof(struct bnx2x_slowpath));
6258
6259#ifdef BCM_ISCSI
6260 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6261
6262 /* Initialize T1 */
6263 for (i = 0; i < 64*1024; i += 64) {
6264 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6265 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6266 }
6267
6268 /* allocate searcher T2 table
6269 we allocate 1/4 of alloc num for T2
6270 (which is not entered into the ILT) */
6271 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6272
6273 /* Initialize T2 */
6274 for (i = 0; i < 16*1024; i += 64)
6275 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6276
c14423fe 6277 /* now fixup the last line in the block to point to the next block */
a2fbb9ea
ET
6278 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6279
6280 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6281 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6282
6283 /* QM queues (128*MAX_CONN) */
6284 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6285#endif
6286
6287 /* Slow path ring */
6288 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6289
6290 return 0;
6291
6292alloc_mem_err:
6293 bnx2x_free_mem(bp);
6294 return -ENOMEM;
6295
6296#undef BNX2X_PCI_ALLOC
6297#undef BNX2X_ALLOC
6298}
6299
6300static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6301{
6302 int i;
6303
555f6c78 6304 for_each_tx_queue(bp, i) {
a2fbb9ea
ET
6305 struct bnx2x_fastpath *fp = &bp->fp[i];
6306
6307 u16 bd_cons = fp->tx_bd_cons;
6308 u16 sw_prod = fp->tx_pkt_prod;
6309 u16 sw_cons = fp->tx_pkt_cons;
6310
a2fbb9ea
ET
6311 while (sw_cons != sw_prod) {
6312 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6313 sw_cons++;
6314 }
6315 }
6316}
6317
6318static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6319{
6320 int i, j;
6321
555f6c78 6322 for_each_rx_queue(bp, j) {
a2fbb9ea
ET
6323 struct bnx2x_fastpath *fp = &bp->fp[j];
6324
a2fbb9ea
ET
6325 for (i = 0; i < NUM_RX_BD; i++) {
6326 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6327 struct sk_buff *skb = rx_buf->skb;
6328
6329 if (skb == NULL)
6330 continue;
6331
6332 pci_unmap_single(bp->pdev,
6333 pci_unmap_addr(rx_buf, mapping),
437cf2f1 6334 bp->rx_buf_size,
a2fbb9ea
ET
6335 PCI_DMA_FROMDEVICE);
6336
6337 rx_buf->skb = NULL;
6338 dev_kfree_skb(skb);
6339 }
7a9b2557 6340 if (!fp->disable_tpa)
32626230
EG
6341 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6342 ETH_MAX_AGGREGATION_QUEUES_E1 :
7a9b2557 6343 ETH_MAX_AGGREGATION_QUEUES_E1H);
a2fbb9ea
ET
6344 }
6345}
6346
6347static void bnx2x_free_skbs(struct bnx2x *bp)
6348{
6349 bnx2x_free_tx_skbs(bp);
6350 bnx2x_free_rx_skbs(bp);
6351}
6352
6353static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6354{
34f80b04 6355 int i, offset = 1;
a2fbb9ea
ET
6356
6357 free_irq(bp->msix_table[0].vector, bp->dev);
c14423fe 6358 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
a2fbb9ea
ET
6359 bp->msix_table[0].vector);
6360
6361 for_each_queue(bp, i) {
c14423fe 6362 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
34f80b04 6363 "state %x\n", i, bp->msix_table[i + offset].vector,
a2fbb9ea
ET
6364 bnx2x_fp(bp, i, state));
6365
34f80b04 6366 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
a2fbb9ea 6367 }
a2fbb9ea
ET
6368}
6369
6370static void bnx2x_free_irq(struct bnx2x *bp)
6371{
a2fbb9ea 6372 if (bp->flags & USING_MSIX_FLAG) {
a2fbb9ea
ET
6373 bnx2x_free_msix_irqs(bp);
6374 pci_disable_msix(bp->pdev);
a2fbb9ea
ET
6375 bp->flags &= ~USING_MSIX_FLAG;
6376
8badd27a
EG
6377 } else if (bp->flags & USING_MSI_FLAG) {
6378 free_irq(bp->pdev->irq, bp->dev);
6379 pci_disable_msi(bp->pdev);
6380 bp->flags &= ~USING_MSI_FLAG;
6381
a2fbb9ea
ET
6382 } else
6383 free_irq(bp->pdev->irq, bp->dev);
6384}
6385
6386static int bnx2x_enable_msix(struct bnx2x *bp)
6387{
8badd27a
EG
6388 int i, rc, offset = 1;
6389 int igu_vec = 0;
a2fbb9ea 6390
8badd27a
EG
6391 bp->msix_table[0].entry = igu_vec;
6392 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
a2fbb9ea 6393
34f80b04 6394 for_each_queue(bp, i) {
8badd27a 6395 igu_vec = BP_L_ID(bp) + offset + i;
34f80b04
EG
6396 bp->msix_table[i + offset].entry = igu_vec;
6397 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6398 "(fastpath #%u)\n", i + offset, igu_vec, i);
a2fbb9ea
ET
6399 }
6400
34f80b04 6401 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
555f6c78 6402 BNX2X_NUM_QUEUES(bp) + offset);
34f80b04 6403 if (rc) {
8badd27a
EG
6404 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6405 return rc;
34f80b04 6406 }
8badd27a 6407
a2fbb9ea
ET
6408 bp->flags |= USING_MSIX_FLAG;
6409
6410 return 0;
a2fbb9ea
ET
6411}
6412
a2fbb9ea
ET
6413static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6414{
34f80b04 6415 int i, rc, offset = 1;
a2fbb9ea 6416
a2fbb9ea
ET
6417 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6418 bp->dev->name, bp->dev);
a2fbb9ea
ET
6419 if (rc) {
6420 BNX2X_ERR("request sp irq failed\n");
6421 return -EBUSY;
6422 }
6423
6424 for_each_queue(bp, i) {
555f6c78
EG
6425 struct bnx2x_fastpath *fp = &bp->fp[i];
6426
6427 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
34f80b04 6428 rc = request_irq(bp->msix_table[i + offset].vector,
555f6c78 6429 bnx2x_msix_fp_int, 0, fp->name, fp);
a2fbb9ea 6430 if (rc) {
555f6c78 6431 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
a2fbb9ea
ET
6432 bnx2x_free_msix_irqs(bp);
6433 return -EBUSY;
6434 }
6435
555f6c78 6436 fp->state = BNX2X_FP_STATE_IRQ;
a2fbb9ea
ET
6437 }
6438
555f6c78
EG
6439 i = BNX2X_NUM_QUEUES(bp);
6440 if (is_multi(bp))
6441 printk(KERN_INFO PFX
6442 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6443 bp->dev->name, bp->msix_table[0].vector,
6444 bp->msix_table[offset].vector,
6445 bp->msix_table[offset + i - 1].vector);
6446 else
6447 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6448 bp->dev->name, bp->msix_table[0].vector,
6449 bp->msix_table[offset + i - 1].vector);
6450
a2fbb9ea 6451 return 0;
a2fbb9ea
ET
6452}
6453
8badd27a
EG
6454static int bnx2x_enable_msi(struct bnx2x *bp)
6455{
6456 int rc;
6457
6458 rc = pci_enable_msi(bp->pdev);
6459 if (rc) {
6460 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6461 return -1;
6462 }
6463 bp->flags |= USING_MSI_FLAG;
6464
6465 return 0;
6466}
6467
a2fbb9ea
ET
6468static int bnx2x_req_irq(struct bnx2x *bp)
6469{
8badd27a 6470 unsigned long flags;
34f80b04 6471 int rc;
a2fbb9ea 6472
8badd27a
EG
6473 if (bp->flags & USING_MSI_FLAG)
6474 flags = 0;
6475 else
6476 flags = IRQF_SHARED;
6477
6478 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
34f80b04 6479 bp->dev->name, bp->dev);
a2fbb9ea
ET
6480 if (!rc)
6481 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6482
6483 return rc;
a2fbb9ea
ET
6484}
6485
65abd74d
YG
6486static void bnx2x_napi_enable(struct bnx2x *bp)
6487{
6488 int i;
6489
555f6c78 6490 for_each_rx_queue(bp, i)
65abd74d
YG
6491 napi_enable(&bnx2x_fp(bp, i, napi));
6492}
6493
6494static void bnx2x_napi_disable(struct bnx2x *bp)
6495{
6496 int i;
6497
555f6c78 6498 for_each_rx_queue(bp, i)
65abd74d
YG
6499 napi_disable(&bnx2x_fp(bp, i, napi));
6500}
6501
6502static void bnx2x_netif_start(struct bnx2x *bp)
6503{
6504 if (atomic_dec_and_test(&bp->intr_sem)) {
6505 if (netif_running(bp->dev)) {
65abd74d
YG
6506 bnx2x_napi_enable(bp);
6507 bnx2x_int_enable(bp);
555f6c78
EG
6508 if (bp->state == BNX2X_STATE_OPEN)
6509 netif_tx_wake_all_queues(bp->dev);
65abd74d
YG
6510 }
6511 }
6512}
6513
f8ef6e44 6514static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
65abd74d 6515{
f8ef6e44 6516 bnx2x_int_disable_sync(bp, disable_hw);
e94d8af3 6517 bnx2x_napi_disable(bp);
65abd74d 6518 if (netif_running(bp->dev)) {
65abd74d
YG
6519 netif_tx_disable(bp->dev);
6520 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6521 }
6522}
6523
a2fbb9ea
ET
6524/*
6525 * Init service functions
6526 */
6527
3101c2bc 6528static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
a2fbb9ea
ET
6529{
6530 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
34f80b04 6531 int port = BP_PORT(bp);
a2fbb9ea
ET
6532
6533 /* CAM allocation
6534 * unicasts 0-31:port0 32-63:port1
6535 * multicast 64-127:port0 128-191:port1
6536 */
8d9c5f34 6537 config->hdr.length = 2;
af246401 6538 config->hdr.offset = port ? 32 : 0;
34f80b04 6539 config->hdr.client_id = BP_CL_ID(bp);
a2fbb9ea
ET
6540 config->hdr.reserved1 = 0;
6541
6542 /* primary MAC */
6543 config->config_table[0].cam_entry.msb_mac_addr =
6544 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6545 config->config_table[0].cam_entry.middle_mac_addr =
6546 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6547 config->config_table[0].cam_entry.lsb_mac_addr =
6548 swab16(*(u16 *)&bp->dev->dev_addr[4]);
34f80b04 6549 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6550 if (set)
6551 config->config_table[0].target_table_entry.flags = 0;
6552 else
6553 CAM_INVALIDATE(config->config_table[0]);
a2fbb9ea
ET
6554 config->config_table[0].target_table_entry.client_id = 0;
6555 config->config_table[0].target_table_entry.vlan_id = 0;
6556
3101c2bc
YG
6557 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6558 (set ? "setting" : "clearing"),
a2fbb9ea
ET
6559 config->config_table[0].cam_entry.msb_mac_addr,
6560 config->config_table[0].cam_entry.middle_mac_addr,
6561 config->config_table[0].cam_entry.lsb_mac_addr);
6562
6563 /* broadcast */
6564 config->config_table[1].cam_entry.msb_mac_addr = 0xffff;
6565 config->config_table[1].cam_entry.middle_mac_addr = 0xffff;
6566 config->config_table[1].cam_entry.lsb_mac_addr = 0xffff;
34f80b04 6567 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
3101c2bc
YG
6568 if (set)
6569 config->config_table[1].target_table_entry.flags =
a2fbb9ea 6570 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
3101c2bc
YG
6571 else
6572 CAM_INVALIDATE(config->config_table[1]);
a2fbb9ea
ET
6573 config->config_table[1].target_table_entry.client_id = 0;
6574 config->config_table[1].target_table_entry.vlan_id = 0;
6575
6576 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6577 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6578 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6579}
6580
3101c2bc 6581static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
34f80b04
EG
6582{
6583 struct mac_configuration_cmd_e1h *config =
6584 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6585
3101c2bc 6586 if (set && (bp->state != BNX2X_STATE_OPEN)) {
34f80b04
EG
6587 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6588 return;
6589 }
6590
6591 /* CAM allocation for E1H
6592 * unicasts: by func number
6593 * multicast: 20+FUNC*20, 20 each
6594 */
8d9c5f34 6595 config->hdr.length = 1;
34f80b04
EG
6596 config->hdr.offset = BP_FUNC(bp);
6597 config->hdr.client_id = BP_CL_ID(bp);
6598 config->hdr.reserved1 = 0;
6599
6600 /* primary MAC */
6601 config->config_table[0].msb_mac_addr =
6602 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6603 config->config_table[0].middle_mac_addr =
6604 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6605 config->config_table[0].lsb_mac_addr =
6606 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6607 config->config_table[0].client_id = BP_L_ID(bp);
6608 config->config_table[0].vlan_id = 0;
6609 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
3101c2bc
YG
6610 if (set)
6611 config->config_table[0].flags = BP_PORT(bp);
6612 else
6613 config->config_table[0].flags =
6614 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
34f80b04 6615
3101c2bc
YG
6616 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6617 (set ? "setting" : "clearing"),
34f80b04
EG
6618 config->config_table[0].msb_mac_addr,
6619 config->config_table[0].middle_mac_addr,
6620 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6621
6622 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6623 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6624 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6625}
6626
a2fbb9ea
ET
6627static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6628 int *state_p, int poll)
6629{
6630 /* can take a while if any port is running */
34f80b04 6631 int cnt = 500;
a2fbb9ea 6632
c14423fe
ET
6633 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6634 poll ? "polling" : "waiting", state, idx);
a2fbb9ea
ET
6635
6636 might_sleep();
34f80b04 6637 while (cnt--) {
a2fbb9ea
ET
6638 if (poll) {
6639 bnx2x_rx_int(bp->fp, 10);
34f80b04
EG
6640 /* if index is different from 0
6641 * the reply for some commands will
3101c2bc 6642 * be on the non default queue
a2fbb9ea
ET
6643 */
6644 if (idx)
6645 bnx2x_rx_int(&bp->fp[idx], 10);
6646 }
a2fbb9ea 6647
3101c2bc 6648 mb(); /* state is changed by bnx2x_sp_event() */
49d66772 6649 if (*state_p == state)
a2fbb9ea
ET
6650 return 0;
6651
a2fbb9ea 6652 msleep(1);
a2fbb9ea
ET
6653 }
6654
a2fbb9ea 6655 /* timeout! */
49d66772
ET
6656 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6657 poll ? "polling" : "waiting", state, idx);
34f80b04
EG
6658#ifdef BNX2X_STOP_ON_ERROR
6659 bnx2x_panic();
6660#endif
a2fbb9ea 6661
49d66772 6662 return -EBUSY;
a2fbb9ea
ET
6663}
6664
6665static int bnx2x_setup_leading(struct bnx2x *bp)
6666{
34f80b04 6667 int rc;
a2fbb9ea 6668
c14423fe 6669 /* reset IGU state */
34f80b04 6670 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea
ET
6671
6672 /* SETUP ramrod */
6673 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6674
34f80b04
EG
6675 /* Wait for completion */
6676 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
a2fbb9ea 6677
34f80b04 6678 return rc;
a2fbb9ea
ET
6679}
6680
6681static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6682{
555f6c78
EG
6683 struct bnx2x_fastpath *fp = &bp->fp[index];
6684
a2fbb9ea 6685 /* reset IGU state */
555f6c78 6686 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
a2fbb9ea 6687
228241eb 6688 /* SETUP ramrod */
555f6c78
EG
6689 fp->state = BNX2X_FP_STATE_OPENING;
6690 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6691 fp->cl_id, 0);
a2fbb9ea
ET
6692
6693 /* Wait for completion */
6694 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
555f6c78 6695 &(fp->state), 0);
a2fbb9ea
ET
6696}
6697
a2fbb9ea 6698static int bnx2x_poll(struct napi_struct *napi, int budget);
a2fbb9ea 6699
8badd27a 6700static void bnx2x_set_int_mode(struct bnx2x *bp)
a2fbb9ea 6701{
555f6c78 6702 int num_queues;
a2fbb9ea 6703
8badd27a
EG
6704 switch (int_mode) {
6705 case INT_MODE_INTx:
6706 case INT_MODE_MSI:
555f6c78
EG
6707 num_queues = 1;
6708 bp->num_rx_queues = num_queues;
6709 bp->num_tx_queues = num_queues;
6710 DP(NETIF_MSG_IFUP,
6711 "set number of queues to %d\n", num_queues);
8badd27a
EG
6712 break;
6713
6714 case INT_MODE_MSIX:
6715 default:
555f6c78
EG
6716 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6717 num_queues = min_t(u32, num_online_cpus(),
6718 BNX2X_MAX_QUEUES(bp));
34f80b04 6719 else
555f6c78
EG
6720 num_queues = 1;
6721 bp->num_rx_queues = num_queues;
6722 bp->num_tx_queues = num_queues;
6723 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6724 " number of tx queues to %d\n",
6725 bp->num_rx_queues, bp->num_tx_queues);
2dfe0e1f
EG
6726 /* if we can't use MSI-X we only need one fp,
6727 * so try to enable MSI-X with the requested number of fp's
6728 * and fallback to MSI or legacy INTx with one fp
6729 */
8badd27a 6730 if (bnx2x_enable_msix(bp)) {
34f80b04 6731 /* failed to enable MSI-X */
555f6c78
EG
6732 num_queues = 1;
6733 bp->num_rx_queues = num_queues;
6734 bp->num_tx_queues = num_queues;
6735 if (bp->multi_mode)
6736 BNX2X_ERR("Multi requested but failed to "
6737 "enable MSI-X set number of "
6738 "queues to %d\n", num_queues);
a2fbb9ea 6739 }
8badd27a 6740 break;
a2fbb9ea 6741 }
555f6c78 6742 bp->dev->real_num_tx_queues = bp->num_tx_queues;
8badd27a
EG
6743}
6744
6745static void bnx2x_set_rx_mode(struct net_device *dev);
6746
6747/* must be called with rtnl_lock */
6748static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6749{
6750 u32 load_code;
6751 int i, rc = 0;
6752#ifdef BNX2X_STOP_ON_ERROR
6753 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6754 if (unlikely(bp->panic))
6755 return -EPERM;
6756#endif
6757
6758 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6759
6760 bnx2x_set_int_mode(bp);
c14423fe 6761
a2fbb9ea
ET
6762 if (bnx2x_alloc_mem(bp))
6763 return -ENOMEM;
6764
555f6c78 6765 for_each_rx_queue(bp, i)
7a9b2557
VZ
6766 bnx2x_fp(bp, i, disable_tpa) =
6767 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6768
555f6c78 6769 for_each_rx_queue(bp, i)
2dfe0e1f
EG
6770 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6771 bnx2x_poll, 128);
6772
6773#ifdef BNX2X_STOP_ON_ERROR
555f6c78 6774 for_each_rx_queue(bp, i) {
2dfe0e1f
EG
6775 struct bnx2x_fastpath *fp = &bp->fp[i];
6776
6777 fp->poll_no_work = 0;
6778 fp->poll_calls = 0;
6779 fp->poll_max_calls = 0;
6780 fp->poll_complete = 0;
6781 fp->poll_exit = 0;
6782 }
6783#endif
6784 bnx2x_napi_enable(bp);
6785
34f80b04
EG
6786 if (bp->flags & USING_MSIX_FLAG) {
6787 rc = bnx2x_req_msix_irqs(bp);
6788 if (rc) {
6789 pci_disable_msix(bp->pdev);
2dfe0e1f 6790 goto load_error1;
34f80b04
EG
6791 }
6792 } else {
8badd27a
EG
6793 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6794 bnx2x_enable_msi(bp);
34f80b04
EG
6795 bnx2x_ack_int(bp);
6796 rc = bnx2x_req_irq(bp);
6797 if (rc) {
2dfe0e1f 6798 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
8badd27a
EG
6799 if (bp->flags & USING_MSI_FLAG)
6800 pci_disable_msi(bp->pdev);
2dfe0e1f 6801 goto load_error1;
a2fbb9ea 6802 }
8badd27a
EG
6803 if (bp->flags & USING_MSI_FLAG) {
6804 bp->dev->irq = bp->pdev->irq;
6805 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6806 bp->dev->name, bp->pdev->irq);
6807 }
a2fbb9ea
ET
6808 }
6809
2dfe0e1f
EG
6810 /* Send LOAD_REQUEST command to MCP
6811 Returns the type of LOAD command:
6812 if it is the first port to be initialized
6813 common blocks should be initialized, otherwise - not
6814 */
6815 if (!BP_NOMCP(bp)) {
6816 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
6817 if (!load_code) {
6818 BNX2X_ERR("MCP response failure, aborting\n");
6819 rc = -EBUSY;
6820 goto load_error2;
6821 }
6822 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
6823 rc = -EBUSY; /* other port in diagnostic mode */
6824 goto load_error2;
6825 }
6826
6827 } else {
6828 int port = BP_PORT(bp);
6829
6830 DP(NETIF_MSG_IFUP, "NO MCP load counts before us %d, %d, %d\n",
6831 load_count[0], load_count[1], load_count[2]);
6832 load_count[0]++;
6833 load_count[1 + port]++;
6834 DP(NETIF_MSG_IFUP, "NO MCP new load counts %d, %d, %d\n",
6835 load_count[0], load_count[1], load_count[2]);
6836 if (load_count[0] == 1)
6837 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
6838 else if (load_count[1 + port] == 1)
6839 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
6840 else
6841 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
6842 }
6843
6844 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
6845 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
6846 bp->port.pmf = 1;
6847 else
6848 bp->port.pmf = 0;
6849 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
a2fbb9ea 6850
a2fbb9ea 6851 /* Initialize HW */
34f80b04
EG
6852 rc = bnx2x_init_hw(bp, load_code);
6853 if (rc) {
a2fbb9ea 6854 BNX2X_ERR("HW init failed, aborting\n");
2dfe0e1f 6855 goto load_error2;
a2fbb9ea
ET
6856 }
6857
a2fbb9ea 6858 /* Setup NIC internals and enable interrupts */
471de716 6859 bnx2x_nic_init(bp, load_code);
a2fbb9ea
ET
6860
6861 /* Send LOAD_DONE command to MCP */
34f80b04 6862 if (!BP_NOMCP(bp)) {
228241eb
ET
6863 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
6864 if (!load_code) {
da5a662a 6865 BNX2X_ERR("MCP response failure, aborting\n");
34f80b04 6866 rc = -EBUSY;
2dfe0e1f 6867 goto load_error3;
a2fbb9ea
ET
6868 }
6869 }
6870
6871 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
6872
34f80b04
EG
6873 rc = bnx2x_setup_leading(bp);
6874 if (rc) {
da5a662a 6875 BNX2X_ERR("Setup leading failed!\n");
2dfe0e1f 6876 goto load_error3;
34f80b04 6877 }
a2fbb9ea 6878
34f80b04
EG
6879 if (CHIP_IS_E1H(bp))
6880 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
6881 BNX2X_ERR("!!! mf_cfg function disabled\n");
6882 bp->state = BNX2X_STATE_DISABLED;
6883 }
a2fbb9ea 6884
34f80b04
EG
6885 if (bp->state == BNX2X_STATE_OPEN)
6886 for_each_nondefault_queue(bp, i) {
6887 rc = bnx2x_setup_multi(bp, i);
6888 if (rc)
2dfe0e1f 6889 goto load_error3;
34f80b04 6890 }
a2fbb9ea 6891
34f80b04 6892 if (CHIP_IS_E1(bp))
3101c2bc 6893 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 6894 else
3101c2bc 6895 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04
EG
6896
6897 if (bp->port.pmf)
6898 bnx2x_initial_phy_init(bp);
a2fbb9ea
ET
6899
6900 /* Start fast path */
34f80b04
EG
6901 switch (load_mode) {
6902 case LOAD_NORMAL:
6903 /* Tx queue should be only reenabled */
555f6c78 6904 netif_tx_wake_all_queues(bp->dev);
2dfe0e1f 6905 /* Initialize the receive filter. */
34f80b04
EG
6906 bnx2x_set_rx_mode(bp->dev);
6907 break;
6908
6909 case LOAD_OPEN:
555f6c78 6910 netif_tx_start_all_queues(bp->dev);
2dfe0e1f 6911 /* Initialize the receive filter. */
34f80b04 6912 bnx2x_set_rx_mode(bp->dev);
34f80b04 6913 break;
a2fbb9ea 6914
34f80b04 6915 case LOAD_DIAG:
2dfe0e1f 6916 /* Initialize the receive filter. */
a2fbb9ea 6917 bnx2x_set_rx_mode(bp->dev);
34f80b04
EG
6918 bp->state = BNX2X_STATE_DIAG;
6919 break;
6920
6921 default:
6922 break;
a2fbb9ea
ET
6923 }
6924
34f80b04
EG
6925 if (!bp->port.pmf)
6926 bnx2x__link_status_update(bp);
6927
a2fbb9ea
ET
6928 /* start the timer */
6929 mod_timer(&bp->timer, jiffies + bp->current_interval);
6930
34f80b04 6931
a2fbb9ea
ET
6932 return 0;
6933
2dfe0e1f
EG
6934load_error3:
6935 bnx2x_int_disable_sync(bp, 1);
6936 if (!BP_NOMCP(bp)) {
6937 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
6938 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
6939 }
6940 bp->port.pmf = 0;
7a9b2557
VZ
6941 /* Free SKBs, SGEs, TPA pool and driver internals */
6942 bnx2x_free_skbs(bp);
555f6c78 6943 for_each_rx_queue(bp, i)
3196a88a 6944 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
2dfe0e1f 6945load_error2:
d1014634
YG
6946 /* Release IRQs */
6947 bnx2x_free_irq(bp);
2dfe0e1f
EG
6948load_error1:
6949 bnx2x_napi_disable(bp);
555f6c78 6950 for_each_rx_queue(bp, i)
7cde1c8b 6951 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
6952 bnx2x_free_mem(bp);
6953
6954 /* TBD we really need to reset the chip
6955 if we want to recover from this */
34f80b04 6956 return rc;
a2fbb9ea
ET
6957}
6958
6959static int bnx2x_stop_multi(struct bnx2x *bp, int index)
6960{
555f6c78 6961 struct bnx2x_fastpath *fp = &bp->fp[index];
a2fbb9ea
ET
6962 int rc;
6963
c14423fe 6964 /* halt the connection */
555f6c78
EG
6965 fp->state = BNX2X_FP_STATE_HALTING;
6966 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
a2fbb9ea 6967
34f80b04 6968 /* Wait for completion */
a2fbb9ea 6969 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
555f6c78 6970 &(fp->state), 1);
c14423fe 6971 if (rc) /* timeout */
a2fbb9ea
ET
6972 return rc;
6973
6974 /* delete cfc entry */
6975 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
6976
34f80b04
EG
6977 /* Wait for completion */
6978 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
555f6c78 6979 &(fp->state), 1);
34f80b04 6980 return rc;
a2fbb9ea
ET
6981}
6982
da5a662a 6983static int bnx2x_stop_leading(struct bnx2x *bp)
a2fbb9ea 6984{
49d66772 6985 u16 dsb_sp_prod_idx;
c14423fe 6986 /* if the other port is handling traffic,
a2fbb9ea 6987 this can take a lot of time */
34f80b04
EG
6988 int cnt = 500;
6989 int rc;
a2fbb9ea
ET
6990
6991 might_sleep();
6992
6993 /* Send HALT ramrod */
6994 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
34f80b04 6995 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, BP_CL_ID(bp), 0);
a2fbb9ea 6996
34f80b04
EG
6997 /* Wait for completion */
6998 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
6999 &(bp->fp[0].state), 1);
7000 if (rc) /* timeout */
da5a662a 7001 return rc;
a2fbb9ea 7002
49d66772 7003 dsb_sp_prod_idx = *bp->dsb_sp_prod;
a2fbb9ea 7004
228241eb 7005 /* Send PORT_DELETE ramrod */
a2fbb9ea
ET
7006 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7007
49d66772 7008 /* Wait for completion to arrive on default status block
a2fbb9ea
ET
7009 we are going to reset the chip anyway
7010 so there is not much to do if this times out
7011 */
34f80b04 7012 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
34f80b04
EG
7013 if (!cnt) {
7014 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7015 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7016 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7017#ifdef BNX2X_STOP_ON_ERROR
7018 bnx2x_panic();
da5a662a
VZ
7019#else
7020 rc = -EBUSY;
34f80b04
EG
7021#endif
7022 break;
7023 }
7024 cnt--;
da5a662a 7025 msleep(1);
5650d9d4 7026 rmb(); /* Refresh the dsb_sp_prod */
49d66772
ET
7027 }
7028 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7029 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
da5a662a
VZ
7030
7031 return rc;
a2fbb9ea
ET
7032}
7033
34f80b04
EG
7034static void bnx2x_reset_func(struct bnx2x *bp)
7035{
7036 int port = BP_PORT(bp);
7037 int func = BP_FUNC(bp);
7038 int base, i;
7039
7040 /* Configure IGU */
7041 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7042 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7043
34f80b04
EG
7044 /* Clear ILT */
7045 base = FUNC_ILT_BASE(func);
7046 for (i = base; i < base + ILT_PER_FUNC; i++)
7047 bnx2x_ilt_wr(bp, i, 0);
7048}
7049
7050static void bnx2x_reset_port(struct bnx2x *bp)
7051{
7052 int port = BP_PORT(bp);
7053 u32 val;
7054
7055 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7056
7057 /* Do not rcv packets to BRB */
7058 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7059 /* Do not direct rcv packets that are not for MCP to the BRB */
7060 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7061 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7062
7063 /* Configure AEU */
7064 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7065
7066 msleep(100);
7067 /* Check for BRB port occupancy */
7068 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7069 if (val)
7070 DP(NETIF_MSG_IFDOWN,
33471629 7071 "BRB1 is not empty %d blocks are occupied\n", val);
34f80b04
EG
7072
7073 /* TODO: Close Doorbell port? */
7074}
7075
34f80b04
EG
7076static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7077{
7078 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7079 BP_FUNC(bp), reset_code);
7080
7081 switch (reset_code) {
7082 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7083 bnx2x_reset_port(bp);
7084 bnx2x_reset_func(bp);
7085 bnx2x_reset_common(bp);
7086 break;
7087
7088 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7089 bnx2x_reset_port(bp);
7090 bnx2x_reset_func(bp);
7091 break;
7092
7093 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7094 bnx2x_reset_func(bp);
7095 break;
49d66772 7096
34f80b04
EG
7097 default:
7098 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7099 break;
7100 }
7101}
7102
33471629 7103/* must be called with rtnl_lock */
34f80b04 7104static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
a2fbb9ea 7105{
da5a662a 7106 int port = BP_PORT(bp);
a2fbb9ea 7107 u32 reset_code = 0;
da5a662a 7108 int i, cnt, rc;
a2fbb9ea
ET
7109
7110 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7111
228241eb
ET
7112 bp->rx_mode = BNX2X_RX_MODE_NONE;
7113 bnx2x_set_storm_rx_mode(bp);
a2fbb9ea 7114
f8ef6e44 7115 bnx2x_netif_stop(bp, 1);
e94d8af3 7116
34f80b04
EG
7117 del_timer_sync(&bp->timer);
7118 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7119 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
bb2a0f7a 7120 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04 7121
70b9986c
EG
7122 /* Release IRQs */
7123 bnx2x_free_irq(bp);
7124
555f6c78
EG
7125 /* Wait until tx fastpath tasks complete */
7126 for_each_tx_queue(bp, i) {
228241eb
ET
7127 struct bnx2x_fastpath *fp = &bp->fp[i];
7128
34f80b04
EG
7129 cnt = 1000;
7130 smp_rmb();
e8b5fc51 7131 while (bnx2x_has_tx_work_unload(fp)) {
da5a662a 7132
65abd74d 7133 bnx2x_tx_int(fp, 1000);
34f80b04
EG
7134 if (!cnt) {
7135 BNX2X_ERR("timeout waiting for queue[%d]\n",
7136 i);
7137#ifdef BNX2X_STOP_ON_ERROR
7138 bnx2x_panic();
7139 return -EBUSY;
7140#else
7141 break;
7142#endif
7143 }
7144 cnt--;
da5a662a 7145 msleep(1);
34f80b04
EG
7146 smp_rmb();
7147 }
228241eb 7148 }
da5a662a
VZ
7149 /* Give HW time to discard old tx messages */
7150 msleep(1);
a2fbb9ea 7151
3101c2bc
YG
7152 if (CHIP_IS_E1(bp)) {
7153 struct mac_configuration_cmd *config =
7154 bnx2x_sp(bp, mcast_config);
7155
7156 bnx2x_set_mac_addr_e1(bp, 0);
7157
8d9c5f34 7158 for (i = 0; i < config->hdr.length; i++)
3101c2bc
YG
7159 CAM_INVALIDATE(config->config_table[i]);
7160
8d9c5f34 7161 config->hdr.length = i;
3101c2bc
YG
7162 if (CHIP_REV_IS_SLOW(bp))
7163 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7164 else
7165 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7166 config->hdr.client_id = BP_CL_ID(bp);
7167 config->hdr.reserved1 = 0;
7168
7169 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7170 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7171 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7172
7173 } else { /* E1H */
65abd74d
YG
7174 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7175
3101c2bc
YG
7176 bnx2x_set_mac_addr_e1h(bp, 0);
7177
7178 for (i = 0; i < MC_HASH_SIZE; i++)
7179 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7180 }
7181
65abd74d
YG
7182 if (unload_mode == UNLOAD_NORMAL)
7183 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7184
7185 else if (bp->flags & NO_WOL_FLAG) {
7186 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7187 if (CHIP_IS_E1H(bp))
7188 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7189
7190 } else if (bp->wol) {
7191 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7192 u8 *mac_addr = bp->dev->dev_addr;
7193 u32 val;
7194 /* The mac address is written to entries 1-4 to
7195 preserve entry 0 which is used by the PMF */
7196 u8 entry = (BP_E1HVN(bp) + 1)*8;
7197
7198 val = (mac_addr[0] << 8) | mac_addr[1];
7199 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7200
7201 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7202 (mac_addr[4] << 8) | mac_addr[5];
7203 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7204
7205 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7206
7207 } else
7208 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7209
34f80b04
EG
7210 /* Close multi and leading connections
7211 Completions for ramrods are collected in a synchronous way */
a2fbb9ea
ET
7212 for_each_nondefault_queue(bp, i)
7213 if (bnx2x_stop_multi(bp, i))
228241eb 7214 goto unload_error;
a2fbb9ea 7215
da5a662a
VZ
7216 rc = bnx2x_stop_leading(bp);
7217 if (rc) {
34f80b04 7218 BNX2X_ERR("Stop leading failed!\n");
da5a662a 7219#ifdef BNX2X_STOP_ON_ERROR
34f80b04 7220 return -EBUSY;
da5a662a
VZ
7221#else
7222 goto unload_error;
34f80b04 7223#endif
228241eb
ET
7224 }
7225
7226unload_error:
34f80b04 7227 if (!BP_NOMCP(bp))
228241eb 7228 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7229 else {
7230 DP(NETIF_MSG_IFDOWN, "NO MCP load counts %d, %d, %d\n",
7231 load_count[0], load_count[1], load_count[2]);
7232 load_count[0]--;
da5a662a 7233 load_count[1 + port]--;
34f80b04
EG
7234 DP(NETIF_MSG_IFDOWN, "NO MCP new load counts %d, %d, %d\n",
7235 load_count[0], load_count[1], load_count[2]);
7236 if (load_count[0] == 0)
7237 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
da5a662a 7238 else if (load_count[1 + port] == 0)
34f80b04
EG
7239 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7240 else
7241 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7242 }
a2fbb9ea 7243
34f80b04
EG
7244 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7245 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7246 bnx2x__link_reset(bp);
a2fbb9ea
ET
7247
7248 /* Reset the chip */
228241eb 7249 bnx2x_reset_chip(bp, reset_code);
a2fbb9ea
ET
7250
7251 /* Report UNLOAD_DONE to MCP */
34f80b04 7252 if (!BP_NOMCP(bp))
a2fbb9ea 7253 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
9a035440 7254 bp->port.pmf = 0;
a2fbb9ea 7255
7a9b2557 7256 /* Free SKBs, SGEs, TPA pool and driver internals */
a2fbb9ea 7257 bnx2x_free_skbs(bp);
555f6c78 7258 for_each_rx_queue(bp, i)
3196a88a 7259 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 7260 for_each_rx_queue(bp, i)
7cde1c8b 7261 netif_napi_del(&bnx2x_fp(bp, i, napi));
a2fbb9ea
ET
7262 bnx2x_free_mem(bp);
7263
7264 bp->state = BNX2X_STATE_CLOSED;
228241eb 7265
a2fbb9ea
ET
7266 netif_carrier_off(bp->dev);
7267
7268 return 0;
7269}
7270
34f80b04
EG
7271static void bnx2x_reset_task(struct work_struct *work)
7272{
7273 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7274
7275#ifdef BNX2X_STOP_ON_ERROR
7276 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7277 " so reset not done to allow debug dump,\n"
7278 KERN_ERR " you will need to reboot when done\n");
7279 return;
7280#endif
7281
7282 rtnl_lock();
7283
7284 if (!netif_running(bp->dev))
7285 goto reset_task_exit;
7286
7287 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7288 bnx2x_nic_load(bp, LOAD_NORMAL);
7289
7290reset_task_exit:
7291 rtnl_unlock();
7292}
7293
a2fbb9ea
ET
7294/* end of nic load/unload */
7295
7296/* ethtool_ops */
7297
7298/*
7299 * Init service functions
7300 */
7301
f1ef27ef
EG
7302static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7303{
7304 switch (func) {
7305 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7306 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7307 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7308 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7309 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7310 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7311 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7312 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7313 default:
7314 BNX2X_ERR("Unsupported function index: %d\n", func);
7315 return (u32)(-1);
7316 }
7317}
7318
7319static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7320{
7321 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7322
7323 /* Flush all outstanding writes */
7324 mmiowb();
7325
7326 /* Pretend to be function 0 */
7327 REG_WR(bp, reg, 0);
7328 /* Flush the GRC transaction (in the chip) */
7329 new_val = REG_RD(bp, reg);
7330 if (new_val != 0) {
7331 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7332 new_val);
7333 BUG();
7334 }
7335
7336 /* From now we are in the "like-E1" mode */
7337 bnx2x_int_disable(bp);
7338
7339 /* Flush all outstanding writes */
7340 mmiowb();
7341
7342 /* Restore the original funtion settings */
7343 REG_WR(bp, reg, orig_func);
7344 new_val = REG_RD(bp, reg);
7345 if (new_val != orig_func) {
7346 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7347 orig_func, new_val);
7348 BUG();
7349 }
7350}
7351
7352static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7353{
7354 if (CHIP_IS_E1H(bp))
7355 bnx2x_undi_int_disable_e1h(bp, func);
7356 else
7357 bnx2x_int_disable(bp);
7358}
7359
34f80b04
EG
7360static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7361{
7362 u32 val;
7363
7364 /* Check if there is any driver already loaded */
7365 val = REG_RD(bp, MISC_REG_UNPREPARED);
7366 if (val == 0x1) {
7367 /* Check if it is the UNDI driver
7368 * UNDI driver initializes CID offset for normal bell to 0x7
7369 */
4a37fb66 7370 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7371 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7372 if (val == 0x7) {
7373 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
da5a662a 7374 /* save our func */
34f80b04 7375 int func = BP_FUNC(bp);
da5a662a
VZ
7376 u32 swap_en;
7377 u32 swap_val;
34f80b04 7378
b4661739
EG
7379 /* clear the UNDI indication */
7380 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7381
34f80b04
EG
7382 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7383
7384 /* try unload UNDI on port 0 */
7385 bp->func = 0;
da5a662a
VZ
7386 bp->fw_seq =
7387 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7388 DRV_MSG_SEQ_NUMBER_MASK);
34f80b04 7389 reset_code = bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7390
7391 /* if UNDI is loaded on the other port */
7392 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7393
da5a662a
VZ
7394 /* send "DONE" for previous unload */
7395 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7396
7397 /* unload UNDI on port 1 */
34f80b04 7398 bp->func = 1;
da5a662a
VZ
7399 bp->fw_seq =
7400 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7401 DRV_MSG_SEQ_NUMBER_MASK);
7402 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7403
7404 bnx2x_fw_command(bp, reset_code);
34f80b04
EG
7405 }
7406
b4661739
EG
7407 /* now it's safe to release the lock */
7408 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7409
f1ef27ef 7410 bnx2x_undi_int_disable(bp, func);
da5a662a
VZ
7411
7412 /* close input traffic and wait for it */
7413 /* Do not rcv packets to BRB */
7414 REG_WR(bp,
7415 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7416 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7417 /* Do not direct rcv packets that are not for MCP to
7418 * the BRB */
7419 REG_WR(bp,
7420 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7421 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7422 /* clear AEU */
7423 REG_WR(bp,
7424 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7425 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7426 msleep(10);
7427
7428 /* save NIG port swap info */
7429 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7430 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
34f80b04
EG
7431 /* reset device */
7432 REG_WR(bp,
7433 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
da5a662a 7434 0xd3ffffff);
34f80b04
EG
7435 REG_WR(bp,
7436 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7437 0x1403);
da5a662a
VZ
7438 /* take the NIG out of reset and restore swap values */
7439 REG_WR(bp,
7440 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7441 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7442 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7443 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7444
7445 /* send unload done to the MCP */
7446 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7447
7448 /* restore our func and fw_seq */
7449 bp->func = func;
7450 bp->fw_seq =
7451 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7452 DRV_MSG_SEQ_NUMBER_MASK);
b4661739
EG
7453
7454 } else
7455 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
34f80b04
EG
7456 }
7457}
7458
7459static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7460{
7461 u32 val, val2, val3, val4, id;
72ce58c3 7462 u16 pmc;
34f80b04
EG
7463
7464 /* Get the chip revision id and number. */
7465 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7466 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7467 id = ((val & 0xffff) << 16);
7468 val = REG_RD(bp, MISC_REG_CHIP_REV);
7469 id |= ((val & 0xf) << 12);
7470 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7471 id |= ((val & 0xff) << 4);
5a40e08e 7472 val = REG_RD(bp, MISC_REG_BOND_ID);
34f80b04
EG
7473 id |= (val & 0xf);
7474 bp->common.chip_id = id;
7475 bp->link_params.chip_id = bp->common.chip_id;
7476 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7477
1c06328c
EG
7478 val = (REG_RD(bp, 0x2874) & 0x55);
7479 if ((bp->common.chip_id & 0x1) ||
7480 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7481 bp->flags |= ONE_PORT_FLAG;
7482 BNX2X_DEV_INFO("single port device\n");
7483 }
7484
34f80b04
EG
7485 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7486 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7487 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7488 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7489 bp->common.flash_size, bp->common.flash_size);
7490
7491 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7492 bp->link_params.shmem_base = bp->common.shmem_base;
7493 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7494
7495 if (!bp->common.shmem_base ||
7496 (bp->common.shmem_base < 0xA0000) ||
7497 (bp->common.shmem_base >= 0xC0000)) {
7498 BNX2X_DEV_INFO("MCP not active\n");
7499 bp->flags |= NO_MCP_FLAG;
7500 return;
7501 }
7502
7503 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7504 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7505 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7506 BNX2X_ERR("BAD MCP validity signature\n");
7507
7508 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
35b19ba5 7509 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
34f80b04
EG
7510
7511 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7512 SHARED_HW_CFG_LED_MODE_MASK) >>
7513 SHARED_HW_CFG_LED_MODE_SHIFT);
7514
7515 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7516 bp->common.bc_ver = val;
7517 BNX2X_DEV_INFO("bc_ver %X\n", val);
7518 if (val < BNX2X_BC_VER) {
7519 /* for now only warn
7520 * later we might need to enforce this */
7521 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7522 " please upgrade BC\n", BNX2X_BC_VER, val);
7523 }
72ce58c3
EG
7524
7525 if (BP_E1HVN(bp) == 0) {
7526 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7527 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7528 } else {
7529 /* no WOL capability for E1HVN != 0 */
7530 bp->flags |= NO_WOL_FLAG;
7531 }
7532 BNX2X_DEV_INFO("%sWoL capable\n",
7533 (bp->flags & NO_WOL_FLAG) ? "Not " : "");
34f80b04
EG
7534
7535 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7536 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7537 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7538 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7539
7540 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7541 val, val2, val3, val4);
7542}
7543
7544static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7545 u32 switch_cfg)
a2fbb9ea 7546{
34f80b04 7547 int port = BP_PORT(bp);
a2fbb9ea
ET
7548 u32 ext_phy_type;
7549
a2fbb9ea
ET
7550 switch (switch_cfg) {
7551 case SWITCH_CFG_1G:
7552 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7553
c18487ee
YR
7554 ext_phy_type =
7555 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7556 switch (ext_phy_type) {
7557 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7558 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7559 ext_phy_type);
7560
34f80b04
EG
7561 bp->port.supported |= (SUPPORTED_10baseT_Half |
7562 SUPPORTED_10baseT_Full |
7563 SUPPORTED_100baseT_Half |
7564 SUPPORTED_100baseT_Full |
7565 SUPPORTED_1000baseT_Full |
7566 SUPPORTED_2500baseX_Full |
7567 SUPPORTED_TP |
7568 SUPPORTED_FIBRE |
7569 SUPPORTED_Autoneg |
7570 SUPPORTED_Pause |
7571 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7572 break;
7573
7574 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7575 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7576 ext_phy_type);
7577
34f80b04
EG
7578 bp->port.supported |= (SUPPORTED_10baseT_Half |
7579 SUPPORTED_10baseT_Full |
7580 SUPPORTED_100baseT_Half |
7581 SUPPORTED_100baseT_Full |
7582 SUPPORTED_1000baseT_Full |
7583 SUPPORTED_TP |
7584 SUPPORTED_FIBRE |
7585 SUPPORTED_Autoneg |
7586 SUPPORTED_Pause |
7587 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7588 break;
7589
7590 default:
7591 BNX2X_ERR("NVRAM config error. "
7592 "BAD SerDes ext_phy_config 0x%x\n",
c18487ee 7593 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7594 return;
7595 }
7596
34f80b04
EG
7597 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7598 port*0x10);
7599 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea
ET
7600 break;
7601
7602 case SWITCH_CFG_10G:
7603 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7604
c18487ee
YR
7605 ext_phy_type =
7606 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
a2fbb9ea
ET
7607 switch (ext_phy_type) {
7608 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7609 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7610 ext_phy_type);
7611
34f80b04
EG
7612 bp->port.supported |= (SUPPORTED_10baseT_Half |
7613 SUPPORTED_10baseT_Full |
7614 SUPPORTED_100baseT_Half |
7615 SUPPORTED_100baseT_Full |
7616 SUPPORTED_1000baseT_Full |
7617 SUPPORTED_2500baseX_Full |
7618 SUPPORTED_10000baseT_Full |
7619 SUPPORTED_TP |
7620 SUPPORTED_FIBRE |
7621 SUPPORTED_Autoneg |
7622 SUPPORTED_Pause |
7623 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7624 break;
7625
7626 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
f1410647 7627 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
34f80b04 7628 ext_phy_type);
f1410647 7629
34f80b04
EG
7630 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7631 SUPPORTED_FIBRE |
7632 SUPPORTED_Pause |
7633 SUPPORTED_Asym_Pause);
f1410647
ET
7634 break;
7635
a2fbb9ea 7636 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
f1410647
ET
7637 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7638 ext_phy_type);
7639
34f80b04
EG
7640 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7641 SUPPORTED_1000baseT_Full |
7642 SUPPORTED_FIBRE |
7643 SUPPORTED_Pause |
7644 SUPPORTED_Asym_Pause);
f1410647
ET
7645 break;
7646
7647 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7648 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
a2fbb9ea
ET
7649 ext_phy_type);
7650
34f80b04
EG
7651 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7652 SUPPORTED_1000baseT_Full |
7653 SUPPORTED_FIBRE |
7654 SUPPORTED_Autoneg |
7655 SUPPORTED_Pause |
7656 SUPPORTED_Asym_Pause);
f1410647
ET
7657 break;
7658
c18487ee
YR
7659 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7660 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7661 ext_phy_type);
7662
34f80b04
EG
7663 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7664 SUPPORTED_2500baseX_Full |
7665 SUPPORTED_1000baseT_Full |
7666 SUPPORTED_FIBRE |
7667 SUPPORTED_Autoneg |
7668 SUPPORTED_Pause |
7669 SUPPORTED_Asym_Pause);
c18487ee
YR
7670 break;
7671
f1410647
ET
7672 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7673 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7674 ext_phy_type);
7675
34f80b04
EG
7676 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7677 SUPPORTED_TP |
7678 SUPPORTED_Autoneg |
7679 SUPPORTED_Pause |
7680 SUPPORTED_Asym_Pause);
a2fbb9ea
ET
7681 break;
7682
c18487ee
YR
7683 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7684 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7685 bp->link_params.ext_phy_config);
7686 break;
7687
a2fbb9ea
ET
7688 default:
7689 BNX2X_ERR("NVRAM config error. "
7690 "BAD XGXS ext_phy_config 0x%x\n",
c18487ee 7691 bp->link_params.ext_phy_config);
a2fbb9ea
ET
7692 return;
7693 }
7694
34f80b04
EG
7695 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7696 port*0x18);
7697 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
a2fbb9ea 7698
a2fbb9ea
ET
7699 break;
7700
7701 default:
7702 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
34f80b04 7703 bp->port.link_config);
a2fbb9ea
ET
7704 return;
7705 }
34f80b04 7706 bp->link_params.phy_addr = bp->port.phy_addr;
a2fbb9ea
ET
7707
7708 /* mask what we support according to speed_cap_mask */
c18487ee
YR
7709 if (!(bp->link_params.speed_cap_mask &
7710 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
34f80b04 7711 bp->port.supported &= ~SUPPORTED_10baseT_Half;
a2fbb9ea 7712
c18487ee
YR
7713 if (!(bp->link_params.speed_cap_mask &
7714 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
34f80b04 7715 bp->port.supported &= ~SUPPORTED_10baseT_Full;
a2fbb9ea 7716
c18487ee
YR
7717 if (!(bp->link_params.speed_cap_mask &
7718 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
34f80b04 7719 bp->port.supported &= ~SUPPORTED_100baseT_Half;
a2fbb9ea 7720
c18487ee
YR
7721 if (!(bp->link_params.speed_cap_mask &
7722 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
34f80b04 7723 bp->port.supported &= ~SUPPORTED_100baseT_Full;
a2fbb9ea 7724
c18487ee
YR
7725 if (!(bp->link_params.speed_cap_mask &
7726 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
34f80b04
EG
7727 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7728 SUPPORTED_1000baseT_Full);
a2fbb9ea 7729
c18487ee
YR
7730 if (!(bp->link_params.speed_cap_mask &
7731 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
34f80b04 7732 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
a2fbb9ea 7733
c18487ee
YR
7734 if (!(bp->link_params.speed_cap_mask &
7735 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
34f80b04 7736 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
a2fbb9ea 7737
34f80b04 7738 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
a2fbb9ea
ET
7739}
7740
34f80b04 7741static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
a2fbb9ea 7742{
c18487ee 7743 bp->link_params.req_duplex = DUPLEX_FULL;
a2fbb9ea 7744
34f80b04 7745 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
a2fbb9ea 7746 case PORT_FEATURE_LINK_SPEED_AUTO:
34f80b04 7747 if (bp->port.supported & SUPPORTED_Autoneg) {
c18487ee 7748 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7749 bp->port.advertising = bp->port.supported;
a2fbb9ea 7750 } else {
c18487ee
YR
7751 u32 ext_phy_type =
7752 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7753
7754 if ((ext_phy_type ==
7755 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7756 (ext_phy_type ==
7757 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
a2fbb9ea 7758 /* force 10G, no AN */
c18487ee 7759 bp->link_params.req_line_speed = SPEED_10000;
34f80b04 7760 bp->port.advertising =
a2fbb9ea
ET
7761 (ADVERTISED_10000baseT_Full |
7762 ADVERTISED_FIBRE);
7763 break;
7764 }
7765 BNX2X_ERR("NVRAM config error. "
7766 "Invalid link_config 0x%x"
7767 " Autoneg not supported\n",
34f80b04 7768 bp->port.link_config);
a2fbb9ea
ET
7769 return;
7770 }
7771 break;
7772
7773 case PORT_FEATURE_LINK_SPEED_10M_FULL:
34f80b04 7774 if (bp->port.supported & SUPPORTED_10baseT_Full) {
c18487ee 7775 bp->link_params.req_line_speed = SPEED_10;
34f80b04
EG
7776 bp->port.advertising = (ADVERTISED_10baseT_Full |
7777 ADVERTISED_TP);
a2fbb9ea
ET
7778 } else {
7779 BNX2X_ERR("NVRAM config error. "
7780 "Invalid link_config 0x%x"
7781 " speed_cap_mask 0x%x\n",
34f80b04 7782 bp->port.link_config,
c18487ee 7783 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7784 return;
7785 }
7786 break;
7787
7788 case PORT_FEATURE_LINK_SPEED_10M_HALF:
34f80b04 7789 if (bp->port.supported & SUPPORTED_10baseT_Half) {
c18487ee
YR
7790 bp->link_params.req_line_speed = SPEED_10;
7791 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7792 bp->port.advertising = (ADVERTISED_10baseT_Half |
7793 ADVERTISED_TP);
a2fbb9ea
ET
7794 } else {
7795 BNX2X_ERR("NVRAM config error. "
7796 "Invalid link_config 0x%x"
7797 " speed_cap_mask 0x%x\n",
34f80b04 7798 bp->port.link_config,
c18487ee 7799 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7800 return;
7801 }
7802 break;
7803
7804 case PORT_FEATURE_LINK_SPEED_100M_FULL:
34f80b04 7805 if (bp->port.supported & SUPPORTED_100baseT_Full) {
c18487ee 7806 bp->link_params.req_line_speed = SPEED_100;
34f80b04
EG
7807 bp->port.advertising = (ADVERTISED_100baseT_Full |
7808 ADVERTISED_TP);
a2fbb9ea
ET
7809 } else {
7810 BNX2X_ERR("NVRAM config error. "
7811 "Invalid link_config 0x%x"
7812 " speed_cap_mask 0x%x\n",
34f80b04 7813 bp->port.link_config,
c18487ee 7814 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7815 return;
7816 }
7817 break;
7818
7819 case PORT_FEATURE_LINK_SPEED_100M_HALF:
34f80b04 7820 if (bp->port.supported & SUPPORTED_100baseT_Half) {
c18487ee
YR
7821 bp->link_params.req_line_speed = SPEED_100;
7822 bp->link_params.req_duplex = DUPLEX_HALF;
34f80b04
EG
7823 bp->port.advertising = (ADVERTISED_100baseT_Half |
7824 ADVERTISED_TP);
a2fbb9ea
ET
7825 } else {
7826 BNX2X_ERR("NVRAM config error. "
7827 "Invalid link_config 0x%x"
7828 " speed_cap_mask 0x%x\n",
34f80b04 7829 bp->port.link_config,
c18487ee 7830 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7831 return;
7832 }
7833 break;
7834
7835 case PORT_FEATURE_LINK_SPEED_1G:
34f80b04 7836 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
c18487ee 7837 bp->link_params.req_line_speed = SPEED_1000;
34f80b04
EG
7838 bp->port.advertising = (ADVERTISED_1000baseT_Full |
7839 ADVERTISED_TP);
a2fbb9ea
ET
7840 } else {
7841 BNX2X_ERR("NVRAM config error. "
7842 "Invalid link_config 0x%x"
7843 " speed_cap_mask 0x%x\n",
34f80b04 7844 bp->port.link_config,
c18487ee 7845 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7846 return;
7847 }
7848 break;
7849
7850 case PORT_FEATURE_LINK_SPEED_2_5G:
34f80b04 7851 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
c18487ee 7852 bp->link_params.req_line_speed = SPEED_2500;
34f80b04
EG
7853 bp->port.advertising = (ADVERTISED_2500baseX_Full |
7854 ADVERTISED_TP);
a2fbb9ea
ET
7855 } else {
7856 BNX2X_ERR("NVRAM config error. "
7857 "Invalid link_config 0x%x"
7858 " speed_cap_mask 0x%x\n",
34f80b04 7859 bp->port.link_config,
c18487ee 7860 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7861 return;
7862 }
7863 break;
7864
7865 case PORT_FEATURE_LINK_SPEED_10G_CX4:
7866 case PORT_FEATURE_LINK_SPEED_10G_KX4:
7867 case PORT_FEATURE_LINK_SPEED_10G_KR:
34f80b04 7868 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
c18487ee 7869 bp->link_params.req_line_speed = SPEED_10000;
34f80b04
EG
7870 bp->port.advertising = (ADVERTISED_10000baseT_Full |
7871 ADVERTISED_FIBRE);
a2fbb9ea
ET
7872 } else {
7873 BNX2X_ERR("NVRAM config error. "
7874 "Invalid link_config 0x%x"
7875 " speed_cap_mask 0x%x\n",
34f80b04 7876 bp->port.link_config,
c18487ee 7877 bp->link_params.speed_cap_mask);
a2fbb9ea
ET
7878 return;
7879 }
7880 break;
7881
7882 default:
7883 BNX2X_ERR("NVRAM config error. "
7884 "BAD link speed link_config 0x%x\n",
34f80b04 7885 bp->port.link_config);
c18487ee 7886 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
34f80b04 7887 bp->port.advertising = bp->port.supported;
a2fbb9ea
ET
7888 break;
7889 }
a2fbb9ea 7890
34f80b04
EG
7891 bp->link_params.req_flow_ctrl = (bp->port.link_config &
7892 PORT_FEATURE_FLOW_CONTROL_MASK);
c0700f90 7893 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
4ab84d45 7894 !(bp->port.supported & SUPPORTED_Autoneg))
c0700f90 7895 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 7896
c18487ee 7897 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
f1410647 7898 " advertising 0x%x\n",
c18487ee
YR
7899 bp->link_params.req_line_speed,
7900 bp->link_params.req_duplex,
34f80b04 7901 bp->link_params.req_flow_ctrl, bp->port.advertising);
a2fbb9ea
ET
7902}
7903
34f80b04 7904static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
a2fbb9ea 7905{
34f80b04
EG
7906 int port = BP_PORT(bp);
7907 u32 val, val2;
a2fbb9ea 7908
c18487ee 7909 bp->link_params.bp = bp;
34f80b04 7910 bp->link_params.port = port;
c18487ee 7911
c18487ee 7912 bp->link_params.serdes_config =
f1410647 7913 SHMEM_RD(bp, dev_info.port_hw_config[port].serdes_config);
c18487ee 7914 bp->link_params.lane_config =
a2fbb9ea 7915 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
c18487ee 7916 bp->link_params.ext_phy_config =
a2fbb9ea
ET
7917 SHMEM_RD(bp,
7918 dev_info.port_hw_config[port].external_phy_config);
c18487ee 7919 bp->link_params.speed_cap_mask =
a2fbb9ea
ET
7920 SHMEM_RD(bp,
7921 dev_info.port_hw_config[port].speed_capability_mask);
7922
34f80b04 7923 bp->port.link_config =
a2fbb9ea
ET
7924 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
7925
34f80b04
EG
7926 BNX2X_DEV_INFO("serdes_config 0x%08x lane_config 0x%08x\n"
7927 KERN_INFO " ext_phy_config 0x%08x speed_cap_mask 0x%08x"
7928 " link_config 0x%08x\n",
c18487ee
YR
7929 bp->link_params.serdes_config,
7930 bp->link_params.lane_config,
7931 bp->link_params.ext_phy_config,
34f80b04 7932 bp->link_params.speed_cap_mask, bp->port.link_config);
a2fbb9ea 7933
34f80b04 7934 bp->link_params.switch_cfg = (bp->port.link_config &
c18487ee
YR
7935 PORT_FEATURE_CONNECTED_SWITCH_MASK);
7936 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
a2fbb9ea
ET
7937
7938 bnx2x_link_settings_requested(bp);
7939
7940 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
7941 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
7942 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7943 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
7944 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
7945 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
7946 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
7947 bp->dev->dev_addr[5] = (u8)(val & 0xff);
c18487ee
YR
7948 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
7949 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
34f80b04
EG
7950}
7951
7952static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
7953{
7954 int func = BP_FUNC(bp);
7955 u32 val, val2;
7956 int rc = 0;
a2fbb9ea 7957
34f80b04 7958 bnx2x_get_common_hwinfo(bp);
a2fbb9ea 7959
34f80b04
EG
7960 bp->e1hov = 0;
7961 bp->e1hmf = 0;
7962 if (CHIP_IS_E1H(bp)) {
7963 bp->mf_config =
7964 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
a2fbb9ea 7965
3196a88a
EG
7966 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
7967 FUNC_MF_CFG_E1HOV_TAG_MASK);
34f80b04 7968 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
a2fbb9ea 7969
34f80b04
EG
7970 bp->e1hov = val;
7971 bp->e1hmf = 1;
7972 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
7973 "(0x%04x)\n",
7974 func, bp->e1hov, bp->e1hov);
7975 } else {
7976 BNX2X_DEV_INFO("Single function mode\n");
7977 if (BP_E1HVN(bp)) {
7978 BNX2X_ERR("!!! No valid E1HOV for func %d,"
7979 " aborting\n", func);
7980 rc = -EPERM;
7981 }
7982 }
7983 }
a2fbb9ea 7984
34f80b04
EG
7985 if (!BP_NOMCP(bp)) {
7986 bnx2x_get_port_hwinfo(bp);
7987
7988 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
7989 DRV_MSG_SEQ_NUMBER_MASK);
7990 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7991 }
7992
7993 if (IS_E1HMF(bp)) {
7994 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
7995 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
7996 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
7997 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
7998 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
7999 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8000 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8001 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8002 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8003 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8004 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8005 ETH_ALEN);
8006 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8007 ETH_ALEN);
a2fbb9ea 8008 }
34f80b04
EG
8009
8010 return rc;
a2fbb9ea
ET
8011 }
8012
34f80b04
EG
8013 if (BP_NOMCP(bp)) {
8014 /* only supposed to happen on emulation/FPGA */
33471629 8015 BNX2X_ERR("warning random MAC workaround active\n");
34f80b04
EG
8016 random_ether_addr(bp->dev->dev_addr);
8017 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8018 }
a2fbb9ea 8019
34f80b04
EG
8020 return rc;
8021}
8022
8023static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8024{
8025 int func = BP_FUNC(bp);
87942b46 8026 int timer_interval;
34f80b04
EG
8027 int rc;
8028
da5a662a
VZ
8029 /* Disable interrupt handling until HW is initialized */
8030 atomic_set(&bp->intr_sem, 1);
8031
34f80b04 8032 mutex_init(&bp->port.phy_mutex);
a2fbb9ea 8033
1cf167f2 8034 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
34f80b04
EG
8035 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8036
8037 rc = bnx2x_get_hwinfo(bp);
8038
8039 /* need to reset chip if undi was active */
8040 if (!BP_NOMCP(bp))
8041 bnx2x_undi_unload(bp);
8042
8043 if (CHIP_REV_IS_FPGA(bp))
8044 printk(KERN_ERR PFX "FPGA detected\n");
8045
8046 if (BP_NOMCP(bp) && (func == 0))
8047 printk(KERN_ERR PFX
8048 "MCP disabled, must load devices in order!\n");
8049
555f6c78 8050 /* Set multi queue mode */
8badd27a
EG
8051 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8052 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
555f6c78 8053 printk(KERN_ERR PFX
8badd27a 8054 "Multi disabled since int_mode requested is not MSI-X\n");
555f6c78
EG
8055 multi_mode = ETH_RSS_MODE_DISABLED;
8056 }
8057 bp->multi_mode = multi_mode;
8058
8059
7a9b2557
VZ
8060 /* Set TPA flags */
8061 if (disable_tpa) {
8062 bp->flags &= ~TPA_ENABLE_FLAG;
8063 bp->dev->features &= ~NETIF_F_LRO;
8064 } else {
8065 bp->flags |= TPA_ENABLE_FLAG;
8066 bp->dev->features |= NETIF_F_LRO;
8067 }
8068
8069
34f80b04
EG
8070 bp->tx_ring_size = MAX_TX_AVAIL;
8071 bp->rx_ring_size = MAX_RX_AVAIL;
8072
8073 bp->rx_csum = 1;
34f80b04
EG
8074
8075 bp->tx_ticks = 50;
8076 bp->rx_ticks = 25;
8077
87942b46
EG
8078 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8079 bp->current_interval = (poll ? poll : timer_interval);
34f80b04
EG
8080
8081 init_timer(&bp->timer);
8082 bp->timer.expires = jiffies + bp->current_interval;
8083 bp->timer.data = (unsigned long) bp;
8084 bp->timer.function = bnx2x_timer;
8085
8086 return rc;
a2fbb9ea
ET
8087}
8088
8089/*
8090 * ethtool service functions
8091 */
8092
8093/* All ethtool functions called with rtnl_lock */
8094
8095static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8096{
8097 struct bnx2x *bp = netdev_priv(dev);
8098
34f80b04
EG
8099 cmd->supported = bp->port.supported;
8100 cmd->advertising = bp->port.advertising;
a2fbb9ea
ET
8101
8102 if (netif_carrier_ok(dev)) {
c18487ee
YR
8103 cmd->speed = bp->link_vars.line_speed;
8104 cmd->duplex = bp->link_vars.duplex;
a2fbb9ea 8105 } else {
c18487ee
YR
8106 cmd->speed = bp->link_params.req_line_speed;
8107 cmd->duplex = bp->link_params.req_duplex;
a2fbb9ea 8108 }
34f80b04
EG
8109 if (IS_E1HMF(bp)) {
8110 u16 vn_max_rate;
8111
8112 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8113 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8114 if (vn_max_rate < cmd->speed)
8115 cmd->speed = vn_max_rate;
8116 }
a2fbb9ea 8117
c18487ee
YR
8118 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8119 u32 ext_phy_type =
8120 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
f1410647
ET
8121
8122 switch (ext_phy_type) {
8123 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8124 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8125 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8126 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
c18487ee 8127 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
f1410647
ET
8128 cmd->port = PORT_FIBRE;
8129 break;
8130
8131 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8132 cmd->port = PORT_TP;
8133 break;
8134
c18487ee
YR
8135 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8136 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8137 bp->link_params.ext_phy_config);
8138 break;
8139
f1410647
ET
8140 default:
8141 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
c18487ee
YR
8142 bp->link_params.ext_phy_config);
8143 break;
f1410647
ET
8144 }
8145 } else
a2fbb9ea 8146 cmd->port = PORT_TP;
a2fbb9ea 8147
34f80b04 8148 cmd->phy_address = bp->port.phy_addr;
a2fbb9ea
ET
8149 cmd->transceiver = XCVR_INTERNAL;
8150
c18487ee 8151 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
a2fbb9ea 8152 cmd->autoneg = AUTONEG_ENABLE;
f1410647 8153 else
a2fbb9ea 8154 cmd->autoneg = AUTONEG_DISABLE;
a2fbb9ea
ET
8155
8156 cmd->maxtxpkt = 0;
8157 cmd->maxrxpkt = 0;
8158
8159 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8160 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8161 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8162 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8163 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8164 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8165 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8166
8167 return 0;
8168}
8169
8170static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8171{
8172 struct bnx2x *bp = netdev_priv(dev);
8173 u32 advertising;
8174
34f80b04
EG
8175 if (IS_E1HMF(bp))
8176 return 0;
8177
a2fbb9ea
ET
8178 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8179 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8180 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8181 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8182 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8183 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8184 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8185
a2fbb9ea 8186 if (cmd->autoneg == AUTONEG_ENABLE) {
34f80b04
EG
8187 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8188 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
a2fbb9ea 8189 return -EINVAL;
f1410647 8190 }
a2fbb9ea
ET
8191
8192 /* advertise the requested speed and duplex if supported */
34f80b04 8193 cmd->advertising &= bp->port.supported;
a2fbb9ea 8194
c18487ee
YR
8195 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8196 bp->link_params.req_duplex = DUPLEX_FULL;
34f80b04
EG
8197 bp->port.advertising |= (ADVERTISED_Autoneg |
8198 cmd->advertising);
a2fbb9ea
ET
8199
8200 } else { /* forced speed */
8201 /* advertise the requested speed and duplex if supported */
8202 switch (cmd->speed) {
8203 case SPEED_10:
8204 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8205 if (!(bp->port.supported &
f1410647
ET
8206 SUPPORTED_10baseT_Full)) {
8207 DP(NETIF_MSG_LINK,
8208 "10M full not supported\n");
a2fbb9ea 8209 return -EINVAL;
f1410647 8210 }
a2fbb9ea
ET
8211
8212 advertising = (ADVERTISED_10baseT_Full |
8213 ADVERTISED_TP);
8214 } else {
34f80b04 8215 if (!(bp->port.supported &
f1410647
ET
8216 SUPPORTED_10baseT_Half)) {
8217 DP(NETIF_MSG_LINK,
8218 "10M half not supported\n");
a2fbb9ea 8219 return -EINVAL;
f1410647 8220 }
a2fbb9ea
ET
8221
8222 advertising = (ADVERTISED_10baseT_Half |
8223 ADVERTISED_TP);
8224 }
8225 break;
8226
8227 case SPEED_100:
8228 if (cmd->duplex == DUPLEX_FULL) {
34f80b04 8229 if (!(bp->port.supported &
f1410647
ET
8230 SUPPORTED_100baseT_Full)) {
8231 DP(NETIF_MSG_LINK,
8232 "100M full not supported\n");
a2fbb9ea 8233 return -EINVAL;
f1410647 8234 }
a2fbb9ea
ET
8235
8236 advertising = (ADVERTISED_100baseT_Full |
8237 ADVERTISED_TP);
8238 } else {
34f80b04 8239 if (!(bp->port.supported &
f1410647
ET
8240 SUPPORTED_100baseT_Half)) {
8241 DP(NETIF_MSG_LINK,
8242 "100M half not supported\n");
a2fbb9ea 8243 return -EINVAL;
f1410647 8244 }
a2fbb9ea
ET
8245
8246 advertising = (ADVERTISED_100baseT_Half |
8247 ADVERTISED_TP);
8248 }
8249 break;
8250
8251 case SPEED_1000:
f1410647
ET
8252 if (cmd->duplex != DUPLEX_FULL) {
8253 DP(NETIF_MSG_LINK, "1G half not supported\n");
a2fbb9ea 8254 return -EINVAL;
f1410647 8255 }
a2fbb9ea 8256
34f80b04 8257 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
f1410647 8258 DP(NETIF_MSG_LINK, "1G full not supported\n");
a2fbb9ea 8259 return -EINVAL;
f1410647 8260 }
a2fbb9ea
ET
8261
8262 advertising = (ADVERTISED_1000baseT_Full |
8263 ADVERTISED_TP);
8264 break;
8265
8266 case SPEED_2500:
f1410647
ET
8267 if (cmd->duplex != DUPLEX_FULL) {
8268 DP(NETIF_MSG_LINK,
8269 "2.5G half not supported\n");
a2fbb9ea 8270 return -EINVAL;
f1410647 8271 }
a2fbb9ea 8272
34f80b04 8273 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
f1410647
ET
8274 DP(NETIF_MSG_LINK,
8275 "2.5G full not supported\n");
a2fbb9ea 8276 return -EINVAL;
f1410647 8277 }
a2fbb9ea 8278
f1410647 8279 advertising = (ADVERTISED_2500baseX_Full |
a2fbb9ea
ET
8280 ADVERTISED_TP);
8281 break;
8282
8283 case SPEED_10000:
f1410647
ET
8284 if (cmd->duplex != DUPLEX_FULL) {
8285 DP(NETIF_MSG_LINK, "10G half not supported\n");
a2fbb9ea 8286 return -EINVAL;
f1410647 8287 }
a2fbb9ea 8288
34f80b04 8289 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
f1410647 8290 DP(NETIF_MSG_LINK, "10G full not supported\n");
a2fbb9ea 8291 return -EINVAL;
f1410647 8292 }
a2fbb9ea
ET
8293
8294 advertising = (ADVERTISED_10000baseT_Full |
8295 ADVERTISED_FIBRE);
8296 break;
8297
8298 default:
f1410647 8299 DP(NETIF_MSG_LINK, "Unsupported speed\n");
a2fbb9ea
ET
8300 return -EINVAL;
8301 }
8302
c18487ee
YR
8303 bp->link_params.req_line_speed = cmd->speed;
8304 bp->link_params.req_duplex = cmd->duplex;
34f80b04 8305 bp->port.advertising = advertising;
a2fbb9ea
ET
8306 }
8307
c18487ee 8308 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
a2fbb9ea 8309 DP_LEVEL " req_duplex %d advertising 0x%x\n",
c18487ee 8310 bp->link_params.req_line_speed, bp->link_params.req_duplex,
34f80b04 8311 bp->port.advertising);
a2fbb9ea 8312
34f80b04 8313 if (netif_running(dev)) {
bb2a0f7a 8314 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8315 bnx2x_link_set(bp);
8316 }
a2fbb9ea
ET
8317
8318 return 0;
8319}
8320
c18487ee
YR
8321#define PHY_FW_VER_LEN 10
8322
a2fbb9ea
ET
8323static void bnx2x_get_drvinfo(struct net_device *dev,
8324 struct ethtool_drvinfo *info)
8325{
8326 struct bnx2x *bp = netdev_priv(dev);
f0e53a84 8327 u8 phy_fw_ver[PHY_FW_VER_LEN];
a2fbb9ea
ET
8328
8329 strcpy(info->driver, DRV_MODULE_NAME);
8330 strcpy(info->version, DRV_MODULE_VERSION);
c18487ee
YR
8331
8332 phy_fw_ver[0] = '\0';
34f80b04 8333 if (bp->port.pmf) {
4a37fb66 8334 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8335 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8336 (bp->state != BNX2X_STATE_CLOSED),
8337 phy_fw_ver, PHY_FW_VER_LEN);
4a37fb66 8338 bnx2x_release_phy_lock(bp);
34f80b04 8339 }
c18487ee 8340
f0e53a84
EG
8341 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8342 (bp->common.bc_ver & 0xff0000) >> 16,
8343 (bp->common.bc_ver & 0xff00) >> 8,
8344 (bp->common.bc_ver & 0xff),
8345 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
a2fbb9ea
ET
8346 strcpy(info->bus_info, pci_name(bp->pdev));
8347 info->n_stats = BNX2X_NUM_STATS;
8348 info->testinfo_len = BNX2X_NUM_TESTS;
34f80b04 8349 info->eedump_len = bp->common.flash_size;
a2fbb9ea
ET
8350 info->regdump_len = 0;
8351}
8352
8353static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8354{
8355 struct bnx2x *bp = netdev_priv(dev);
8356
8357 if (bp->flags & NO_WOL_FLAG) {
8358 wol->supported = 0;
8359 wol->wolopts = 0;
8360 } else {
8361 wol->supported = WAKE_MAGIC;
8362 if (bp->wol)
8363 wol->wolopts = WAKE_MAGIC;
8364 else
8365 wol->wolopts = 0;
8366 }
8367 memset(&wol->sopass, 0, sizeof(wol->sopass));
8368}
8369
8370static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8371{
8372 struct bnx2x *bp = netdev_priv(dev);
8373
8374 if (wol->wolopts & ~WAKE_MAGIC)
8375 return -EINVAL;
8376
8377 if (wol->wolopts & WAKE_MAGIC) {
8378 if (bp->flags & NO_WOL_FLAG)
8379 return -EINVAL;
8380
8381 bp->wol = 1;
34f80b04 8382 } else
a2fbb9ea 8383 bp->wol = 0;
34f80b04 8384
a2fbb9ea
ET
8385 return 0;
8386}
8387
8388static u32 bnx2x_get_msglevel(struct net_device *dev)
8389{
8390 struct bnx2x *bp = netdev_priv(dev);
8391
8392 return bp->msglevel;
8393}
8394
8395static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8396{
8397 struct bnx2x *bp = netdev_priv(dev);
8398
8399 if (capable(CAP_NET_ADMIN))
8400 bp->msglevel = level;
8401}
8402
8403static int bnx2x_nway_reset(struct net_device *dev)
8404{
8405 struct bnx2x *bp = netdev_priv(dev);
8406
34f80b04
EG
8407 if (!bp->port.pmf)
8408 return 0;
a2fbb9ea 8409
34f80b04 8410 if (netif_running(dev)) {
bb2a0f7a 8411 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8412 bnx2x_link_set(bp);
8413 }
a2fbb9ea
ET
8414
8415 return 0;
8416}
8417
8418static int bnx2x_get_eeprom_len(struct net_device *dev)
8419{
8420 struct bnx2x *bp = netdev_priv(dev);
8421
34f80b04 8422 return bp->common.flash_size;
a2fbb9ea
ET
8423}
8424
8425static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8426{
34f80b04 8427 int port = BP_PORT(bp);
a2fbb9ea
ET
8428 int count, i;
8429 u32 val = 0;
8430
8431 /* adjust timeout for emulation/FPGA */
8432 count = NVRAM_TIMEOUT_COUNT;
8433 if (CHIP_REV_IS_SLOW(bp))
8434 count *= 100;
8435
8436 /* request access to nvram interface */
8437 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8438 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8439
8440 for (i = 0; i < count*10; i++) {
8441 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8442 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8443 break;
8444
8445 udelay(5);
8446 }
8447
8448 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
34f80b04 8449 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
a2fbb9ea
ET
8450 return -EBUSY;
8451 }
8452
8453 return 0;
8454}
8455
8456static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8457{
34f80b04 8458 int port = BP_PORT(bp);
a2fbb9ea
ET
8459 int count, i;
8460 u32 val = 0;
8461
8462 /* adjust timeout for emulation/FPGA */
8463 count = NVRAM_TIMEOUT_COUNT;
8464 if (CHIP_REV_IS_SLOW(bp))
8465 count *= 100;
8466
8467 /* relinquish nvram interface */
8468 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8469 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8470
8471 for (i = 0; i < count*10; i++) {
8472 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8473 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8474 break;
8475
8476 udelay(5);
8477 }
8478
8479 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
34f80b04 8480 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
a2fbb9ea
ET
8481 return -EBUSY;
8482 }
8483
8484 return 0;
8485}
8486
8487static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8488{
8489 u32 val;
8490
8491 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8492
8493 /* enable both bits, even on read */
8494 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8495 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8496 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8497}
8498
8499static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8500{
8501 u32 val;
8502
8503 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8504
8505 /* disable both bits, even after read */
8506 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8507 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8508 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8509}
8510
8511static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, u32 *ret_val,
8512 u32 cmd_flags)
8513{
f1410647 8514 int count, i, rc;
a2fbb9ea
ET
8515 u32 val;
8516
8517 /* build the command word */
8518 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8519
8520 /* need to clear DONE bit separately */
8521 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8522
8523 /* address of the NVRAM to read from */
8524 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8525 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8526
8527 /* issue a read command */
8528 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8529
8530 /* adjust timeout for emulation/FPGA */
8531 count = NVRAM_TIMEOUT_COUNT;
8532 if (CHIP_REV_IS_SLOW(bp))
8533 count *= 100;
8534
8535 /* wait for completion */
8536 *ret_val = 0;
8537 rc = -EBUSY;
8538 for (i = 0; i < count; i++) {
8539 udelay(5);
8540 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8541
8542 if (val & MCPR_NVM_COMMAND_DONE) {
8543 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
a2fbb9ea
ET
8544 /* we read nvram data in cpu order
8545 * but ethtool sees it as an array of bytes
8546 * converting to big-endian will do the work */
8547 val = cpu_to_be32(val);
8548 *ret_val = val;
8549 rc = 0;
8550 break;
8551 }
8552 }
8553
8554 return rc;
8555}
8556
8557static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8558 int buf_size)
8559{
8560 int rc;
8561 u32 cmd_flags;
8562 u32 val;
8563
8564 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8565 DP(BNX2X_MSG_NVM,
c14423fe 8566 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8567 offset, buf_size);
8568 return -EINVAL;
8569 }
8570
34f80b04
EG
8571 if (offset + buf_size > bp->common.flash_size) {
8572 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8573 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8574 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8575 return -EINVAL;
8576 }
8577
8578 /* request access to nvram interface */
8579 rc = bnx2x_acquire_nvram_lock(bp);
8580 if (rc)
8581 return rc;
8582
8583 /* enable access to nvram interface */
8584 bnx2x_enable_nvram_access(bp);
8585
8586 /* read the first word(s) */
8587 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8588 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8589 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8590 memcpy(ret_buf, &val, 4);
8591
8592 /* advance to the next dword */
8593 offset += sizeof(u32);
8594 ret_buf += sizeof(u32);
8595 buf_size -= sizeof(u32);
8596 cmd_flags = 0;
8597 }
8598
8599 if (rc == 0) {
8600 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8601 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8602 memcpy(ret_buf, &val, 4);
8603 }
8604
8605 /* disable access to nvram interface */
8606 bnx2x_disable_nvram_access(bp);
8607 bnx2x_release_nvram_lock(bp);
8608
8609 return rc;
8610}
8611
8612static int bnx2x_get_eeprom(struct net_device *dev,
8613 struct ethtool_eeprom *eeprom, u8 *eebuf)
8614{
8615 struct bnx2x *bp = netdev_priv(dev);
8616 int rc;
8617
2add3acb
EG
8618 if (!netif_running(dev))
8619 return -EAGAIN;
8620
34f80b04 8621 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8622 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8623 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8624 eeprom->len, eeprom->len);
8625
8626 /* parameters already validated in ethtool_get_eeprom */
8627
8628 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8629
8630 return rc;
8631}
8632
8633static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8634 u32 cmd_flags)
8635{
f1410647 8636 int count, i, rc;
a2fbb9ea
ET
8637
8638 /* build the command word */
8639 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8640
8641 /* need to clear DONE bit separately */
8642 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8643
8644 /* write the data */
8645 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8646
8647 /* address of the NVRAM to write to */
8648 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8649 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8650
8651 /* issue the write command */
8652 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8653
8654 /* adjust timeout for emulation/FPGA */
8655 count = NVRAM_TIMEOUT_COUNT;
8656 if (CHIP_REV_IS_SLOW(bp))
8657 count *= 100;
8658
8659 /* wait for completion */
8660 rc = -EBUSY;
8661 for (i = 0; i < count; i++) {
8662 udelay(5);
8663 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8664 if (val & MCPR_NVM_COMMAND_DONE) {
8665 rc = 0;
8666 break;
8667 }
8668 }
8669
8670 return rc;
8671}
8672
f1410647 8673#define BYTE_OFFSET(offset) (8 * (offset & 0x03))
a2fbb9ea
ET
8674
8675static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
8676 int buf_size)
8677{
8678 int rc;
8679 u32 cmd_flags;
8680 u32 align_offset;
8681 u32 val;
8682
34f80b04
EG
8683 if (offset + buf_size > bp->common.flash_size) {
8684 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8685 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8686 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8687 return -EINVAL;
8688 }
8689
8690 /* request access to nvram interface */
8691 rc = bnx2x_acquire_nvram_lock(bp);
8692 if (rc)
8693 return rc;
8694
8695 /* enable access to nvram interface */
8696 bnx2x_enable_nvram_access(bp);
8697
8698 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
8699 align_offset = (offset & ~0x03);
8700 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
8701
8702 if (rc == 0) {
8703 val &= ~(0xff << BYTE_OFFSET(offset));
8704 val |= (*data_buf << BYTE_OFFSET(offset));
8705
8706 /* nvram data is returned as an array of bytes
8707 * convert it back to cpu order */
8708 val = be32_to_cpu(val);
8709
a2fbb9ea
ET
8710 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
8711 cmd_flags);
8712 }
8713
8714 /* disable access to nvram interface */
8715 bnx2x_disable_nvram_access(bp);
8716 bnx2x_release_nvram_lock(bp);
8717
8718 return rc;
8719}
8720
8721static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
8722 int buf_size)
8723{
8724 int rc;
8725 u32 cmd_flags;
8726 u32 val;
8727 u32 written_so_far;
8728
34f80b04 8729 if (buf_size == 1) /* ethtool */
a2fbb9ea 8730 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
a2fbb9ea
ET
8731
8732 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
34f80b04 8733 DP(BNX2X_MSG_NVM,
c14423fe 8734 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
a2fbb9ea
ET
8735 offset, buf_size);
8736 return -EINVAL;
8737 }
8738
34f80b04
EG
8739 if (offset + buf_size > bp->common.flash_size) {
8740 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
a2fbb9ea 8741 " buf_size (0x%x) > flash_size (0x%x)\n",
34f80b04 8742 offset, buf_size, bp->common.flash_size);
a2fbb9ea
ET
8743 return -EINVAL;
8744 }
8745
8746 /* request access to nvram interface */
8747 rc = bnx2x_acquire_nvram_lock(bp);
8748 if (rc)
8749 return rc;
8750
8751 /* enable access to nvram interface */
8752 bnx2x_enable_nvram_access(bp);
8753
8754 written_so_far = 0;
8755 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8756 while ((written_so_far < buf_size) && (rc == 0)) {
8757 if (written_so_far == (buf_size - sizeof(u32)))
8758 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8759 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
8760 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8761 else if ((offset % NVRAM_PAGE_SIZE) == 0)
8762 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
8763
8764 memcpy(&val, data_buf, 4);
a2fbb9ea
ET
8765
8766 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
8767
8768 /* advance to the next dword */
8769 offset += sizeof(u32);
8770 data_buf += sizeof(u32);
8771 written_so_far += sizeof(u32);
8772 cmd_flags = 0;
8773 }
8774
8775 /* disable access to nvram interface */
8776 bnx2x_disable_nvram_access(bp);
8777 bnx2x_release_nvram_lock(bp);
8778
8779 return rc;
8780}
8781
8782static int bnx2x_set_eeprom(struct net_device *dev,
8783 struct ethtool_eeprom *eeprom, u8 *eebuf)
8784{
8785 struct bnx2x *bp = netdev_priv(dev);
8786 int rc;
8787
9f4c9583
EG
8788 if (!netif_running(dev))
8789 return -EAGAIN;
8790
34f80b04 8791 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
a2fbb9ea
ET
8792 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8793 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8794 eeprom->len, eeprom->len);
8795
8796 /* parameters already validated in ethtool_set_eeprom */
8797
c18487ee 8798 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
34f80b04
EG
8799 if (eeprom->magic == 0x00504859)
8800 if (bp->port.pmf) {
8801
4a37fb66 8802 bnx2x_acquire_phy_lock(bp);
34f80b04
EG
8803 rc = bnx2x_flash_download(bp, BP_PORT(bp),
8804 bp->link_params.ext_phy_config,
8805 (bp->state != BNX2X_STATE_CLOSED),
8806 eebuf, eeprom->len);
bb2a0f7a
YG
8807 if ((bp->state == BNX2X_STATE_OPEN) ||
8808 (bp->state == BNX2X_STATE_DISABLED)) {
34f80b04
EG
8809 rc |= bnx2x_link_reset(&bp->link_params,
8810 &bp->link_vars);
8811 rc |= bnx2x_phy_init(&bp->link_params,
8812 &bp->link_vars);
bb2a0f7a 8813 }
4a37fb66 8814 bnx2x_release_phy_lock(bp);
34f80b04
EG
8815
8816 } else /* Only the PMF can access the PHY */
8817 return -EINVAL;
8818 else
c18487ee 8819 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
a2fbb9ea
ET
8820
8821 return rc;
8822}
8823
8824static int bnx2x_get_coalesce(struct net_device *dev,
8825 struct ethtool_coalesce *coal)
8826{
8827 struct bnx2x *bp = netdev_priv(dev);
8828
8829 memset(coal, 0, sizeof(struct ethtool_coalesce));
8830
8831 coal->rx_coalesce_usecs = bp->rx_ticks;
8832 coal->tx_coalesce_usecs = bp->tx_ticks;
a2fbb9ea
ET
8833
8834 return 0;
8835}
8836
8837static int bnx2x_set_coalesce(struct net_device *dev,
8838 struct ethtool_coalesce *coal)
8839{
8840 struct bnx2x *bp = netdev_priv(dev);
8841
8842 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
8843 if (bp->rx_ticks > 3000)
8844 bp->rx_ticks = 3000;
8845
8846 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
8847 if (bp->tx_ticks > 0x3000)
8848 bp->tx_ticks = 0x3000;
8849
34f80b04 8850 if (netif_running(dev))
a2fbb9ea
ET
8851 bnx2x_update_coalesce(bp);
8852
8853 return 0;
8854}
8855
8856static void bnx2x_get_ringparam(struct net_device *dev,
8857 struct ethtool_ringparam *ering)
8858{
8859 struct bnx2x *bp = netdev_priv(dev);
8860
8861 ering->rx_max_pending = MAX_RX_AVAIL;
8862 ering->rx_mini_max_pending = 0;
8863 ering->rx_jumbo_max_pending = 0;
8864
8865 ering->rx_pending = bp->rx_ring_size;
8866 ering->rx_mini_pending = 0;
8867 ering->rx_jumbo_pending = 0;
8868
8869 ering->tx_max_pending = MAX_TX_AVAIL;
8870 ering->tx_pending = bp->tx_ring_size;
8871}
8872
8873static int bnx2x_set_ringparam(struct net_device *dev,
8874 struct ethtool_ringparam *ering)
8875{
8876 struct bnx2x *bp = netdev_priv(dev);
34f80b04 8877 int rc = 0;
a2fbb9ea
ET
8878
8879 if ((ering->rx_pending > MAX_RX_AVAIL) ||
8880 (ering->tx_pending > MAX_TX_AVAIL) ||
8881 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
8882 return -EINVAL;
8883
8884 bp->rx_ring_size = ering->rx_pending;
8885 bp->tx_ring_size = ering->tx_pending;
8886
34f80b04
EG
8887 if (netif_running(dev)) {
8888 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8889 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea
ET
8890 }
8891
34f80b04 8892 return rc;
a2fbb9ea
ET
8893}
8894
8895static void bnx2x_get_pauseparam(struct net_device *dev,
8896 struct ethtool_pauseparam *epause)
8897{
8898 struct bnx2x *bp = netdev_priv(dev);
8899
c0700f90 8900 epause->autoneg = (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
c18487ee
YR
8901 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
8902
c0700f90
DM
8903 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
8904 BNX2X_FLOW_CTRL_RX);
8905 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
8906 BNX2X_FLOW_CTRL_TX);
a2fbb9ea
ET
8907
8908 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8909 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8910 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8911}
8912
8913static int bnx2x_set_pauseparam(struct net_device *dev,
8914 struct ethtool_pauseparam *epause)
8915{
8916 struct bnx2x *bp = netdev_priv(dev);
8917
34f80b04
EG
8918 if (IS_E1HMF(bp))
8919 return 0;
8920
a2fbb9ea
ET
8921 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
8922 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
8923 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
8924
c0700f90 8925 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
a2fbb9ea 8926
f1410647 8927 if (epause->rx_pause)
c0700f90 8928 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
c18487ee 8929
f1410647 8930 if (epause->tx_pause)
c0700f90 8931 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
c18487ee 8932
c0700f90
DM
8933 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
8934 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
a2fbb9ea 8935
c18487ee 8936 if (epause->autoneg) {
34f80b04 8937 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
3196a88a 8938 DP(NETIF_MSG_LINK, "autoneg not supported\n");
c18487ee
YR
8939 return -EINVAL;
8940 }
a2fbb9ea 8941
c18487ee 8942 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
c0700f90 8943 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
c18487ee 8944 }
a2fbb9ea 8945
c18487ee
YR
8946 DP(NETIF_MSG_LINK,
8947 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
34f80b04
EG
8948
8949 if (netif_running(dev)) {
bb2a0f7a 8950 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
34f80b04
EG
8951 bnx2x_link_set(bp);
8952 }
a2fbb9ea
ET
8953
8954 return 0;
8955}
8956
df0f2343
VZ
8957static int bnx2x_set_flags(struct net_device *dev, u32 data)
8958{
8959 struct bnx2x *bp = netdev_priv(dev);
8960 int changed = 0;
8961 int rc = 0;
8962
8963 /* TPA requires Rx CSUM offloading */
8964 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
8965 if (!(dev->features & NETIF_F_LRO)) {
8966 dev->features |= NETIF_F_LRO;
8967 bp->flags |= TPA_ENABLE_FLAG;
8968 changed = 1;
8969 }
8970
8971 } else if (dev->features & NETIF_F_LRO) {
8972 dev->features &= ~NETIF_F_LRO;
8973 bp->flags &= ~TPA_ENABLE_FLAG;
8974 changed = 1;
8975 }
8976
8977 if (changed && netif_running(dev)) {
8978 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
8979 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
8980 }
8981
8982 return rc;
8983}
8984
a2fbb9ea
ET
8985static u32 bnx2x_get_rx_csum(struct net_device *dev)
8986{
8987 struct bnx2x *bp = netdev_priv(dev);
8988
8989 return bp->rx_csum;
8990}
8991
8992static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
8993{
8994 struct bnx2x *bp = netdev_priv(dev);
df0f2343 8995 int rc = 0;
a2fbb9ea
ET
8996
8997 bp->rx_csum = data;
df0f2343
VZ
8998
8999 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9000 TPA'ed packets will be discarded due to wrong TCP CSUM */
9001 if (!data) {
9002 u32 flags = ethtool_op_get_flags(dev);
9003
9004 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9005 }
9006
9007 return rc;
a2fbb9ea
ET
9008}
9009
9010static int bnx2x_set_tso(struct net_device *dev, u32 data)
9011{
755735eb 9012 if (data) {
a2fbb9ea 9013 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9014 dev->features |= NETIF_F_TSO6;
9015 } else {
a2fbb9ea 9016 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb
EG
9017 dev->features &= ~NETIF_F_TSO6;
9018 }
9019
a2fbb9ea
ET
9020 return 0;
9021}
9022
f3c87cdd 9023static const struct {
a2fbb9ea
ET
9024 char string[ETH_GSTRING_LEN];
9025} bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
f3c87cdd
YG
9026 { "register_test (offline)" },
9027 { "memory_test (offline)" },
9028 { "loopback_test (offline)" },
9029 { "nvram_test (online)" },
9030 { "interrupt_test (online)" },
9031 { "link_test (online)" },
d3d4f495 9032 { "idle check (online)" }
a2fbb9ea
ET
9033};
9034
9035static int bnx2x_self_test_count(struct net_device *dev)
9036{
9037 return BNX2X_NUM_TESTS;
9038}
9039
f3c87cdd
YG
9040static int bnx2x_test_registers(struct bnx2x *bp)
9041{
9042 int idx, i, rc = -ENODEV;
9043 u32 wr_val = 0;
9dabc424 9044 int port = BP_PORT(bp);
f3c87cdd
YG
9045 static const struct {
9046 u32 offset0;
9047 u32 offset1;
9048 u32 mask;
9049 } reg_tbl[] = {
9050/* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9051 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9052 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9053 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9054 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9055 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9056 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9057 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9058 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9059 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9060/* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9061 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9062 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9063 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9064 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9065 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9066 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9067 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9068 { NIG_REG_EGRESS_MNG0_FIFO, 20, 0xffffffff },
9069 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9070/* 20 */ { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9071 { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9072 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9073 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9074 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9075 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9076 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9077 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9078 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9079 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9080/* 30 */ { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9081 { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9082 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9083 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9084 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9085 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9086 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9087 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9088
9089 { 0xffffffff, 0, 0x00000000 }
9090 };
9091
9092 if (!netif_running(bp->dev))
9093 return rc;
9094
9095 /* Repeat the test twice:
9096 First by writing 0x00000000, second by writing 0xffffffff */
9097 for (idx = 0; idx < 2; idx++) {
9098
9099 switch (idx) {
9100 case 0:
9101 wr_val = 0;
9102 break;
9103 case 1:
9104 wr_val = 0xffffffff;
9105 break;
9106 }
9107
9108 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9109 u32 offset, mask, save_val, val;
f3c87cdd
YG
9110
9111 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9112 mask = reg_tbl[i].mask;
9113
9114 save_val = REG_RD(bp, offset);
9115
9116 REG_WR(bp, offset, wr_val);
9117 val = REG_RD(bp, offset);
9118
9119 /* Restore the original register's value */
9120 REG_WR(bp, offset, save_val);
9121
9122 /* verify that value is as expected value */
9123 if ((val & mask) != (wr_val & mask))
9124 goto test_reg_exit;
9125 }
9126 }
9127
9128 rc = 0;
9129
9130test_reg_exit:
9131 return rc;
9132}
9133
9134static int bnx2x_test_memory(struct bnx2x *bp)
9135{
9136 int i, j, rc = -ENODEV;
9137 u32 val;
9138 static const struct {
9139 u32 offset;
9140 int size;
9141 } mem_tbl[] = {
9142 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9143 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9144 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9145 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9146 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9147 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9148 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9149
9150 { 0xffffffff, 0 }
9151 };
9152 static const struct {
9153 char *name;
9154 u32 offset;
9dabc424
YG
9155 u32 e1_mask;
9156 u32 e1h_mask;
f3c87cdd 9157 } prty_tbl[] = {
9dabc424
YG
9158 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9159 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9160 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9161 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9162 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9163 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9164
9165 { NULL, 0xffffffff, 0, 0 }
f3c87cdd
YG
9166 };
9167
9168 if (!netif_running(bp->dev))
9169 return rc;
9170
9171 /* Go through all the memories */
9172 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9173 for (j = 0; j < mem_tbl[i].size; j++)
9174 REG_RD(bp, mem_tbl[i].offset + j*4);
9175
9176 /* Check the parity status */
9177 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9178 val = REG_RD(bp, prty_tbl[i].offset);
9dabc424
YG
9179 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9180 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
f3c87cdd
YG
9181 DP(NETIF_MSG_HW,
9182 "%s is 0x%x\n", prty_tbl[i].name, val);
9183 goto test_mem_exit;
9184 }
9185 }
9186
9187 rc = 0;
9188
9189test_mem_exit:
9190 return rc;
9191}
9192
f3c87cdd
YG
9193static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9194{
9195 int cnt = 1000;
9196
9197 if (link_up)
9198 while (bnx2x_link_test(bp) && cnt--)
9199 msleep(10);
9200}
9201
9202static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9203{
9204 unsigned int pkt_size, num_pkts, i;
9205 struct sk_buff *skb;
9206 unsigned char *packet;
9207 struct bnx2x_fastpath *fp = &bp->fp[0];
9208 u16 tx_start_idx, tx_idx;
9209 u16 rx_start_idx, rx_idx;
9210 u16 pkt_prod;
9211 struct sw_tx_bd *tx_buf;
9212 struct eth_tx_bd *tx_bd;
9213 dma_addr_t mapping;
9214 union eth_rx_cqe *cqe;
9215 u8 cqe_fp_flags;
9216 struct sw_rx_bd *rx_buf;
9217 u16 len;
9218 int rc = -ENODEV;
9219
9220 if (loopback_mode == BNX2X_MAC_LOOPBACK) {
9221 bp->link_params.loopback_mode = LOOPBACK_BMAC;
f3c87cdd 9222 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
f3c87cdd
YG
9223
9224 } else if (loopback_mode == BNX2X_PHY_LOOPBACK) {
3910c8ae 9225 u16 cnt = 1000;
f3c87cdd 9226 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
f3c87cdd 9227 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
f3c87cdd 9228 /* wait until link state is restored */
3910c8ae
EG
9229 if (link_up)
9230 while (cnt-- && bnx2x_test_link(&bp->link_params,
9231 &bp->link_vars))
9232 msleep(10);
f3c87cdd
YG
9233 } else
9234 return -EINVAL;
9235
9236 pkt_size = 1514;
9237 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9238 if (!skb) {
9239 rc = -ENOMEM;
9240 goto test_loopback_exit;
9241 }
9242 packet = skb_put(skb, pkt_size);
9243 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9244 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9245 for (i = ETH_HLEN; i < pkt_size; i++)
9246 packet[i] = (unsigned char) (i & 0xff);
9247
9248 num_pkts = 0;
9249 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9250 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9251
9252 pkt_prod = fp->tx_pkt_prod++;
9253 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9254 tx_buf->first_bd = fp->tx_bd_prod;
9255 tx_buf->skb = skb;
9256
9257 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9258 mapping = pci_map_single(bp->pdev, skb->data,
9259 skb_headlen(skb), PCI_DMA_TODEVICE);
9260 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9261 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9262 tx_bd->nbd = cpu_to_le16(1);
9263 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9264 tx_bd->vlan = cpu_to_le16(pkt_prod);
9265 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9266 ETH_TX_BD_FLAGS_END_BD);
9267 tx_bd->general_data = ((UNICAST_ADDRESS <<
9268 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9269
58f4c4cf
EG
9270 wmb();
9271
f3c87cdd
YG
9272 fp->hw_tx_prods->bds_prod =
9273 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + 1);
9274 mb(); /* FW restriction: must not reorder writing nbd and packets */
9275 fp->hw_tx_prods->packets_prod =
9276 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
9277 DOORBELL(bp, FP_IDX(fp), 0);
9278
9279 mmiowb();
9280
9281 num_pkts++;
9282 fp->tx_bd_prod++;
9283 bp->dev->trans_start = jiffies;
9284
9285 udelay(100);
9286
9287 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9288 if (tx_idx != tx_start_idx + num_pkts)
9289 goto test_loopback_exit;
9290
9291 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9292 if (rx_idx != rx_start_idx + num_pkts)
9293 goto test_loopback_exit;
9294
9295 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9296 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9297 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9298 goto test_loopback_rx_exit;
9299
9300 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9301 if (len != pkt_size)
9302 goto test_loopback_rx_exit;
9303
9304 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9305 skb = rx_buf->skb;
9306 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9307 for (i = ETH_HLEN; i < pkt_size; i++)
9308 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9309 goto test_loopback_rx_exit;
9310
9311 rc = 0;
9312
9313test_loopback_rx_exit:
f3c87cdd
YG
9314
9315 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9316 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9317 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9318 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9319
9320 /* Update producers */
9321 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9322 fp->rx_sge_prod);
f3c87cdd
YG
9323
9324test_loopback_exit:
9325 bp->link_params.loopback_mode = LOOPBACK_NONE;
9326
9327 return rc;
9328}
9329
9330static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9331{
9332 int rc = 0;
9333
9334 if (!netif_running(bp->dev))
9335 return BNX2X_LOOPBACK_FAILED;
9336
f8ef6e44 9337 bnx2x_netif_stop(bp, 1);
3910c8ae 9338 bnx2x_acquire_phy_lock(bp);
f3c87cdd
YG
9339
9340 if (bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up)) {
9341 DP(NETIF_MSG_PROBE, "MAC loopback failed\n");
9342 rc |= BNX2X_MAC_LOOPBACK_FAILED;
9343 }
9344
9345 if (bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up)) {
9346 DP(NETIF_MSG_PROBE, "PHY loopback failed\n");
9347 rc |= BNX2X_PHY_LOOPBACK_FAILED;
9348 }
9349
3910c8ae 9350 bnx2x_release_phy_lock(bp);
f3c87cdd
YG
9351 bnx2x_netif_start(bp);
9352
9353 return rc;
9354}
9355
9356#define CRC32_RESIDUAL 0xdebb20e3
9357
9358static int bnx2x_test_nvram(struct bnx2x *bp)
9359{
9360 static const struct {
9361 int offset;
9362 int size;
9363 } nvram_tbl[] = {
9364 { 0, 0x14 }, /* bootstrap */
9365 { 0x14, 0xec }, /* dir */
9366 { 0x100, 0x350 }, /* manuf_info */
9367 { 0x450, 0xf0 }, /* feature_info */
9368 { 0x640, 0x64 }, /* upgrade_key_info */
9369 { 0x6a4, 0x64 },
9370 { 0x708, 0x70 }, /* manuf_key_info */
9371 { 0x778, 0x70 },
9372 { 0, 0 }
9373 };
9374 u32 buf[0x350 / 4];
9375 u8 *data = (u8 *)buf;
9376 int i, rc;
9377 u32 magic, csum;
9378
9379 rc = bnx2x_nvram_read(bp, 0, data, 4);
9380 if (rc) {
9381 DP(NETIF_MSG_PROBE, "magic value read (rc -%d)\n", -rc);
9382 goto test_nvram_exit;
9383 }
9384
9385 magic = be32_to_cpu(buf[0]);
9386 if (magic != 0x669955aa) {
9387 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9388 rc = -ENODEV;
9389 goto test_nvram_exit;
9390 }
9391
9392 for (i = 0; nvram_tbl[i].size; i++) {
9393
9394 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9395 nvram_tbl[i].size);
9396 if (rc) {
9397 DP(NETIF_MSG_PROBE,
9398 "nvram_tbl[%d] read data (rc -%d)\n", i, -rc);
9399 goto test_nvram_exit;
9400 }
9401
9402 csum = ether_crc_le(nvram_tbl[i].size, data);
9403 if (csum != CRC32_RESIDUAL) {
9404 DP(NETIF_MSG_PROBE,
9405 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9406 rc = -ENODEV;
9407 goto test_nvram_exit;
9408 }
9409 }
9410
9411test_nvram_exit:
9412 return rc;
9413}
9414
9415static int bnx2x_test_intr(struct bnx2x *bp)
9416{
9417 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9418 int i, rc;
9419
9420 if (!netif_running(bp->dev))
9421 return -ENODEV;
9422
8d9c5f34 9423 config->hdr.length = 0;
af246401
EG
9424 if (CHIP_IS_E1(bp))
9425 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9426 else
9427 config->hdr.offset = BP_FUNC(bp);
f3c87cdd
YG
9428 config->hdr.client_id = BP_CL_ID(bp);
9429 config->hdr.reserved1 = 0;
9430
9431 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9432 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9433 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9434 if (rc == 0) {
9435 bp->set_mac_pending++;
9436 for (i = 0; i < 10; i++) {
9437 if (!bp->set_mac_pending)
9438 break;
9439 msleep_interruptible(10);
9440 }
9441 if (i == 10)
9442 rc = -ENODEV;
9443 }
9444
9445 return rc;
9446}
9447
a2fbb9ea
ET
9448static void bnx2x_self_test(struct net_device *dev,
9449 struct ethtool_test *etest, u64 *buf)
9450{
9451 struct bnx2x *bp = netdev_priv(dev);
a2fbb9ea
ET
9452
9453 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9454
f3c87cdd 9455 if (!netif_running(dev))
a2fbb9ea 9456 return;
a2fbb9ea 9457
33471629 9458 /* offline tests are not supported in MF mode */
f3c87cdd
YG
9459 if (IS_E1HMF(bp))
9460 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9461
9462 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9463 u8 link_up;
9464
9465 link_up = bp->link_vars.link_up;
9466 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9467 bnx2x_nic_load(bp, LOAD_DIAG);
9468 /* wait until link state is restored */
9469 bnx2x_wait_for_link(bp, link_up);
9470
9471 if (bnx2x_test_registers(bp) != 0) {
9472 buf[0] = 1;
9473 etest->flags |= ETH_TEST_FL_FAILED;
9474 }
9475 if (bnx2x_test_memory(bp) != 0) {
9476 buf[1] = 1;
9477 etest->flags |= ETH_TEST_FL_FAILED;
9478 }
9479 buf[2] = bnx2x_test_loopback(bp, link_up);
9480 if (buf[2] != 0)
9481 etest->flags |= ETH_TEST_FL_FAILED;
a2fbb9ea 9482
f3c87cdd
YG
9483 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9484 bnx2x_nic_load(bp, LOAD_NORMAL);
9485 /* wait until link state is restored */
9486 bnx2x_wait_for_link(bp, link_up);
9487 }
9488 if (bnx2x_test_nvram(bp) != 0) {
9489 buf[3] = 1;
a2fbb9ea
ET
9490 etest->flags |= ETH_TEST_FL_FAILED;
9491 }
f3c87cdd
YG
9492 if (bnx2x_test_intr(bp) != 0) {
9493 buf[4] = 1;
9494 etest->flags |= ETH_TEST_FL_FAILED;
9495 }
9496 if (bp->port.pmf)
9497 if (bnx2x_link_test(bp) != 0) {
9498 buf[5] = 1;
9499 etest->flags |= ETH_TEST_FL_FAILED;
9500 }
f3c87cdd
YG
9501
9502#ifdef BNX2X_EXTRA_DEBUG
9503 bnx2x_panic_dump(bp);
9504#endif
a2fbb9ea
ET
9505}
9506
de832a55
EG
9507static const struct {
9508 long offset;
9509 int size;
9510 u8 string[ETH_GSTRING_LEN];
9511} bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9512/* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9513 { Q_STATS_OFFSET32(error_bytes_received_hi),
9514 8, "[%d]: rx_error_bytes" },
9515 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9516 8, "[%d]: rx_ucast_packets" },
9517 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9518 8, "[%d]: rx_mcast_packets" },
9519 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9520 8, "[%d]: rx_bcast_packets" },
9521 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9522 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9523 4, "[%d]: rx_phy_ip_err_discards"},
9524 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9525 4, "[%d]: rx_skb_alloc_discard" },
9526 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9527
9528/* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9529 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9530 8, "[%d]: tx_packets" }
9531};
9532
bb2a0f7a
YG
9533static const struct {
9534 long offset;
9535 int size;
9536 u32 flags;
66e855f3
YG
9537#define STATS_FLAGS_PORT 1
9538#define STATS_FLAGS_FUNC 2
de832a55 9539#define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
66e855f3 9540 u8 string[ETH_GSTRING_LEN];
bb2a0f7a 9541} bnx2x_stats_arr[BNX2X_NUM_STATS] = {
de832a55
EG
9542/* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9543 8, STATS_FLAGS_BOTH, "rx_bytes" },
66e855f3 9544 { STATS_OFFSET32(error_bytes_received_hi),
de832a55 9545 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
bb2a0f7a 9546 { STATS_OFFSET32(total_unicast_packets_received_hi),
de832a55 9547 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
bb2a0f7a 9548 { STATS_OFFSET32(total_multicast_packets_received_hi),
de832a55 9549 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
bb2a0f7a 9550 { STATS_OFFSET32(total_broadcast_packets_received_hi),
de832a55 9551 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
bb2a0f7a 9552 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
66e855f3 9553 8, STATS_FLAGS_PORT, "rx_crc_errors" },
bb2a0f7a 9554 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
66e855f3 9555 8, STATS_FLAGS_PORT, "rx_align_errors" },
de832a55
EG
9556 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9557 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9558 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9559 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9560/* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9561 8, STATS_FLAGS_PORT, "rx_fragments" },
9562 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9563 8, STATS_FLAGS_PORT, "rx_jabbers" },
9564 { STATS_OFFSET32(no_buff_discard_hi),
9565 8, STATS_FLAGS_BOTH, "rx_discards" },
9566 { STATS_OFFSET32(mac_filter_discard),
9567 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9568 { STATS_OFFSET32(xxoverflow_discard),
9569 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9570 { STATS_OFFSET32(brb_drop_hi),
9571 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9572 { STATS_OFFSET32(brb_truncate_hi),
9573 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9574 { STATS_OFFSET32(pause_frames_received_hi),
9575 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9576 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9577 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9578 { STATS_OFFSET32(nig_timer_max),
9579 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9580/* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9581 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9582 { STATS_OFFSET32(rx_skb_alloc_failed),
9583 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9584 { STATS_OFFSET32(hw_csum_err),
9585 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9586
9587 { STATS_OFFSET32(total_bytes_transmitted_hi),
9588 8, STATS_FLAGS_BOTH, "tx_bytes" },
9589 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9590 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9591 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9592 8, STATS_FLAGS_BOTH, "tx_packets" },
9593 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9594 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9595 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9596 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
bb2a0f7a 9597 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
66e855f3 9598 8, STATS_FLAGS_PORT, "tx_single_collisions" },
bb2a0f7a 9599 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
66e855f3 9600 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
de832a55 9601/* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
66e855f3 9602 8, STATS_FLAGS_PORT, "tx_deferred" },
bb2a0f7a 9603 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
66e855f3 9604 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
bb2a0f7a 9605 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
66e855f3 9606 8, STATS_FLAGS_PORT, "tx_late_collisions" },
bb2a0f7a 9607 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
66e855f3 9608 8, STATS_FLAGS_PORT, "tx_total_collisions" },
bb2a0f7a 9609 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
66e855f3 9610 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
bb2a0f7a 9611 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
66e855f3 9612 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
bb2a0f7a 9613 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
66e855f3 9614 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
bb2a0f7a 9615 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
66e855f3 9616 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
bb2a0f7a 9617 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
66e855f3 9618 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
bb2a0f7a 9619 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
66e855f3 9620 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
de832a55 9621/* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
66e855f3 9622 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
de832a55
EG
9623 { STATS_OFFSET32(pause_frames_sent_hi),
9624 8, STATS_FLAGS_PORT, "tx_pause_frames" }
a2fbb9ea
ET
9625};
9626
de832a55
EG
9627#define IS_PORT_STAT(i) \
9628 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9629#define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9630#define IS_E1HMF_MODE_STAT(bp) \
9631 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
66e855f3 9632
a2fbb9ea
ET
9633static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9634{
bb2a0f7a 9635 struct bnx2x *bp = netdev_priv(dev);
de832a55 9636 int i, j, k;
bb2a0f7a 9637
a2fbb9ea
ET
9638 switch (stringset) {
9639 case ETH_SS_STATS:
de832a55
EG
9640 if (is_multi(bp)) {
9641 k = 0;
9642 for_each_queue(bp, i) {
9643 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
9644 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
9645 bnx2x_q_stats_arr[j].string, i);
9646 k += BNX2X_NUM_Q_STATS;
9647 }
9648 if (IS_E1HMF_MODE_STAT(bp))
9649 break;
9650 for (j = 0; j < BNX2X_NUM_STATS; j++)
9651 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
9652 bnx2x_stats_arr[j].string);
9653 } else {
9654 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9655 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9656 continue;
9657 strcpy(buf + j*ETH_GSTRING_LEN,
9658 bnx2x_stats_arr[i].string);
9659 j++;
9660 }
bb2a0f7a 9661 }
a2fbb9ea
ET
9662 break;
9663
9664 case ETH_SS_TEST:
9665 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
9666 break;
9667 }
9668}
9669
9670static int bnx2x_get_stats_count(struct net_device *dev)
9671{
bb2a0f7a 9672 struct bnx2x *bp = netdev_priv(dev);
de832a55 9673 int i, num_stats;
bb2a0f7a 9674
de832a55
EG
9675 if (is_multi(bp)) {
9676 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
9677 if (!IS_E1HMF_MODE_STAT(bp))
9678 num_stats += BNX2X_NUM_STATS;
9679 } else {
9680 if (IS_E1HMF_MODE_STAT(bp)) {
9681 num_stats = 0;
9682 for (i = 0; i < BNX2X_NUM_STATS; i++)
9683 if (IS_FUNC_STAT(i))
9684 num_stats++;
9685 } else
9686 num_stats = BNX2X_NUM_STATS;
bb2a0f7a 9687 }
de832a55 9688
bb2a0f7a 9689 return num_stats;
a2fbb9ea
ET
9690}
9691
9692static void bnx2x_get_ethtool_stats(struct net_device *dev,
9693 struct ethtool_stats *stats, u64 *buf)
9694{
9695 struct bnx2x *bp = netdev_priv(dev);
de832a55
EG
9696 u32 *hw_stats, *offset;
9697 int i, j, k;
bb2a0f7a 9698
de832a55
EG
9699 if (is_multi(bp)) {
9700 k = 0;
9701 for_each_queue(bp, i) {
9702 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
9703 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
9704 if (bnx2x_q_stats_arr[j].size == 0) {
9705 /* skip this counter */
9706 buf[k + j] = 0;
9707 continue;
9708 }
9709 offset = (hw_stats +
9710 bnx2x_q_stats_arr[j].offset);
9711 if (bnx2x_q_stats_arr[j].size == 4) {
9712 /* 4-byte counter */
9713 buf[k + j] = (u64) *offset;
9714 continue;
9715 }
9716 /* 8-byte counter */
9717 buf[k + j] = HILO_U64(*offset, *(offset + 1));
9718 }
9719 k += BNX2X_NUM_Q_STATS;
9720 }
9721 if (IS_E1HMF_MODE_STAT(bp))
9722 return;
9723 hw_stats = (u32 *)&bp->eth_stats;
9724 for (j = 0; j < BNX2X_NUM_STATS; j++) {
9725 if (bnx2x_stats_arr[j].size == 0) {
9726 /* skip this counter */
9727 buf[k + j] = 0;
9728 continue;
9729 }
9730 offset = (hw_stats + bnx2x_stats_arr[j].offset);
9731 if (bnx2x_stats_arr[j].size == 4) {
9732 /* 4-byte counter */
9733 buf[k + j] = (u64) *offset;
9734 continue;
9735 }
9736 /* 8-byte counter */
9737 buf[k + j] = HILO_U64(*offset, *(offset + 1));
a2fbb9ea 9738 }
de832a55
EG
9739 } else {
9740 hw_stats = (u32 *)&bp->eth_stats;
9741 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
9742 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
9743 continue;
9744 if (bnx2x_stats_arr[i].size == 0) {
9745 /* skip this counter */
9746 buf[j] = 0;
9747 j++;
9748 continue;
9749 }
9750 offset = (hw_stats + bnx2x_stats_arr[i].offset);
9751 if (bnx2x_stats_arr[i].size == 4) {
9752 /* 4-byte counter */
9753 buf[j] = (u64) *offset;
9754 j++;
9755 continue;
9756 }
9757 /* 8-byte counter */
9758 buf[j] = HILO_U64(*offset, *(offset + 1));
bb2a0f7a 9759 j++;
a2fbb9ea 9760 }
a2fbb9ea
ET
9761 }
9762}
9763
9764static int bnx2x_phys_id(struct net_device *dev, u32 data)
9765{
9766 struct bnx2x *bp = netdev_priv(dev);
34f80b04 9767 int port = BP_PORT(bp);
a2fbb9ea
ET
9768 int i;
9769
34f80b04
EG
9770 if (!netif_running(dev))
9771 return 0;
9772
9773 if (!bp->port.pmf)
9774 return 0;
9775
a2fbb9ea
ET
9776 if (data == 0)
9777 data = 2;
9778
9779 for (i = 0; i < (data * 2); i++) {
c18487ee 9780 if ((i % 2) == 0)
34f80b04 9781 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
c18487ee
YR
9782 bp->link_params.hw_led_mode,
9783 bp->link_params.chip_id);
9784 else
34f80b04 9785 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
c18487ee
YR
9786 bp->link_params.hw_led_mode,
9787 bp->link_params.chip_id);
9788
a2fbb9ea
ET
9789 msleep_interruptible(500);
9790 if (signal_pending(current))
9791 break;
9792 }
9793
c18487ee 9794 if (bp->link_vars.link_up)
34f80b04 9795 bnx2x_set_led(bp, port, LED_MODE_OPER,
c18487ee
YR
9796 bp->link_vars.line_speed,
9797 bp->link_params.hw_led_mode,
9798 bp->link_params.chip_id);
a2fbb9ea
ET
9799
9800 return 0;
9801}
9802
9803static struct ethtool_ops bnx2x_ethtool_ops = {
7a9b2557
VZ
9804 .get_settings = bnx2x_get_settings,
9805 .set_settings = bnx2x_set_settings,
9806 .get_drvinfo = bnx2x_get_drvinfo,
a2fbb9ea
ET
9807 .get_wol = bnx2x_get_wol,
9808 .set_wol = bnx2x_set_wol,
7a9b2557
VZ
9809 .get_msglevel = bnx2x_get_msglevel,
9810 .set_msglevel = bnx2x_set_msglevel,
9811 .nway_reset = bnx2x_nway_reset,
9812 .get_link = ethtool_op_get_link,
9813 .get_eeprom_len = bnx2x_get_eeprom_len,
9814 .get_eeprom = bnx2x_get_eeprom,
9815 .set_eeprom = bnx2x_set_eeprom,
9816 .get_coalesce = bnx2x_get_coalesce,
9817 .set_coalesce = bnx2x_set_coalesce,
9818 .get_ringparam = bnx2x_get_ringparam,
9819 .set_ringparam = bnx2x_set_ringparam,
9820 .get_pauseparam = bnx2x_get_pauseparam,
9821 .set_pauseparam = bnx2x_set_pauseparam,
9822 .get_rx_csum = bnx2x_get_rx_csum,
9823 .set_rx_csum = bnx2x_set_rx_csum,
9824 .get_tx_csum = ethtool_op_get_tx_csum,
755735eb 9825 .set_tx_csum = ethtool_op_set_tx_hw_csum,
7a9b2557
VZ
9826 .set_flags = bnx2x_set_flags,
9827 .get_flags = ethtool_op_get_flags,
9828 .get_sg = ethtool_op_get_sg,
9829 .set_sg = ethtool_op_set_sg,
a2fbb9ea
ET
9830 .get_tso = ethtool_op_get_tso,
9831 .set_tso = bnx2x_set_tso,
9832 .self_test_count = bnx2x_self_test_count,
7a9b2557
VZ
9833 .self_test = bnx2x_self_test,
9834 .get_strings = bnx2x_get_strings,
a2fbb9ea
ET
9835 .phys_id = bnx2x_phys_id,
9836 .get_stats_count = bnx2x_get_stats_count,
bb2a0f7a 9837 .get_ethtool_stats = bnx2x_get_ethtool_stats,
a2fbb9ea
ET
9838};
9839
9840/* end of ethtool_ops */
9841
9842/****************************************************************************
9843* General service functions
9844****************************************************************************/
9845
9846static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
9847{
9848 u16 pmcsr;
9849
9850 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
9851
9852 switch (state) {
9853 case PCI_D0:
34f80b04 9854 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
a2fbb9ea
ET
9855 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
9856 PCI_PM_CTRL_PME_STATUS));
9857
9858 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
33471629 9859 /* delay required during transition out of D3hot */
a2fbb9ea 9860 msleep(20);
34f80b04 9861 break;
a2fbb9ea 9862
34f80b04
EG
9863 case PCI_D3hot:
9864 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
9865 pmcsr |= 3;
a2fbb9ea 9866
34f80b04
EG
9867 if (bp->wol)
9868 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
a2fbb9ea 9869
34f80b04
EG
9870 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
9871 pmcsr);
a2fbb9ea 9872
34f80b04
EG
9873 /* No more memory access after this point until
9874 * device is brought back to D0.
9875 */
9876 break;
9877
9878 default:
9879 return -EINVAL;
9880 }
9881 return 0;
a2fbb9ea
ET
9882}
9883
237907c1
EG
9884static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
9885{
9886 u16 rx_cons_sb;
9887
9888 /* Tell compiler that status block fields can change */
9889 barrier();
9890 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
9891 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
9892 rx_cons_sb++;
9893 return (fp->rx_comp_cons != rx_cons_sb);
9894}
9895
34f80b04
EG
9896/*
9897 * net_device service functions
9898 */
9899
a2fbb9ea
ET
9900static int bnx2x_poll(struct napi_struct *napi, int budget)
9901{
9902 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
9903 napi);
9904 struct bnx2x *bp = fp->bp;
9905 int work_done = 0;
9906
9907#ifdef BNX2X_STOP_ON_ERROR
9908 if (unlikely(bp->panic))
34f80b04 9909 goto poll_panic;
a2fbb9ea
ET
9910#endif
9911
9912 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
9913 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
9914 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
9915
9916 bnx2x_update_fpsb_idx(fp);
9917
237907c1 9918 if (bnx2x_has_tx_work(fp))
a2fbb9ea
ET
9919 bnx2x_tx_int(fp, budget);
9920
237907c1 9921 if (bnx2x_has_rx_work(fp))
a2fbb9ea 9922 work_done = bnx2x_rx_int(fp, budget);
da5a662a 9923 rmb(); /* BNX2X_HAS_WORK() reads the status block */
a2fbb9ea
ET
9924
9925 /* must not complete if we consumed full budget */
da5a662a 9926 if ((work_done < budget) && !BNX2X_HAS_WORK(fp)) {
a2fbb9ea
ET
9927
9928#ifdef BNX2X_STOP_ON_ERROR
34f80b04 9929poll_panic:
a2fbb9ea 9930#endif
288379f0 9931 napi_complete(napi);
a2fbb9ea 9932
34f80b04 9933 bnx2x_ack_sb(bp, FP_SB_ID(fp), USTORM_ID,
a2fbb9ea 9934 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
34f80b04 9935 bnx2x_ack_sb(bp, FP_SB_ID(fp), CSTORM_ID,
a2fbb9ea
ET
9936 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
9937 }
a2fbb9ea
ET
9938 return work_done;
9939}
9940
755735eb
EG
9941
9942/* we split the first BD into headers and data BDs
33471629 9943 * to ease the pain of our fellow microcode engineers
755735eb
EG
9944 * we use one mapping for both BDs
9945 * So far this has only been observed to happen
9946 * in Other Operating Systems(TM)
9947 */
9948static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
9949 struct bnx2x_fastpath *fp,
9950 struct eth_tx_bd **tx_bd, u16 hlen,
9951 u16 bd_prod, int nbd)
9952{
9953 struct eth_tx_bd *h_tx_bd = *tx_bd;
9954 struct eth_tx_bd *d_tx_bd;
9955 dma_addr_t mapping;
9956 int old_len = le16_to_cpu(h_tx_bd->nbytes);
9957
9958 /* first fix first BD */
9959 h_tx_bd->nbd = cpu_to_le16(nbd);
9960 h_tx_bd->nbytes = cpu_to_le16(hlen);
9961
9962 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
9963 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
9964 h_tx_bd->addr_lo, h_tx_bd->nbd);
9965
9966 /* now get a new data BD
9967 * (after the pbd) and fill it */
9968 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9969 d_tx_bd = &fp->tx_desc_ring[bd_prod];
9970
9971 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
9972 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
9973
9974 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9975 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9976 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
9977 d_tx_bd->vlan = 0;
9978 /* this marks the BD as one that has no individual mapping
9979 * the FW ignores this flag in a BD not marked start
9980 */
9981 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
9982 DP(NETIF_MSG_TX_QUEUED,
9983 "TSO split data size is %d (%x:%x)\n",
9984 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
9985
9986 /* update tx_bd for marking the last BD flag */
9987 *tx_bd = d_tx_bd;
9988
9989 return bd_prod;
9990}
9991
9992static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
9993{
9994 if (fix > 0)
9995 csum = (u16) ~csum_fold(csum_sub(csum,
9996 csum_partial(t_header - fix, fix, 0)));
9997
9998 else if (fix < 0)
9999 csum = (u16) ~csum_fold(csum_add(csum,
10000 csum_partial(t_header, -fix, 0)));
10001
10002 return swab16(csum);
10003}
10004
10005static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10006{
10007 u32 rc;
10008
10009 if (skb->ip_summed != CHECKSUM_PARTIAL)
10010 rc = XMIT_PLAIN;
10011
10012 else {
10013 if (skb->protocol == ntohs(ETH_P_IPV6)) {
10014 rc = XMIT_CSUM_V6;
10015 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10016 rc |= XMIT_CSUM_TCP;
10017
10018 } else {
10019 rc = XMIT_CSUM_V4;
10020 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10021 rc |= XMIT_CSUM_TCP;
10022 }
10023 }
10024
10025 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10026 rc |= XMIT_GSO_V4;
10027
10028 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10029 rc |= XMIT_GSO_V6;
10030
10031 return rc;
10032}
10033
632da4d6 10034#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
755735eb
EG
10035/* check if packet requires linearization (packet is too fragmented) */
10036static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10037 u32 xmit_type)
10038{
10039 int to_copy = 0;
10040 int hlen = 0;
10041 int first_bd_sz = 0;
10042
10043 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10044 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10045
10046 if (xmit_type & XMIT_GSO) {
10047 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10048 /* Check if LSO packet needs to be copied:
10049 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10050 int wnd_size = MAX_FETCH_BD - 3;
33471629 10051 /* Number of windows to check */
755735eb
EG
10052 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10053 int wnd_idx = 0;
10054 int frag_idx = 0;
10055 u32 wnd_sum = 0;
10056
10057 /* Headers length */
10058 hlen = (int)(skb_transport_header(skb) - skb->data) +
10059 tcp_hdrlen(skb);
10060
10061 /* Amount of data (w/o headers) on linear part of SKB*/
10062 first_bd_sz = skb_headlen(skb) - hlen;
10063
10064 wnd_sum = first_bd_sz;
10065
10066 /* Calculate the first sum - it's special */
10067 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10068 wnd_sum +=
10069 skb_shinfo(skb)->frags[frag_idx].size;
10070
10071 /* If there was data on linear skb data - check it */
10072 if (first_bd_sz > 0) {
10073 if (unlikely(wnd_sum < lso_mss)) {
10074 to_copy = 1;
10075 goto exit_lbl;
10076 }
10077
10078 wnd_sum -= first_bd_sz;
10079 }
10080
10081 /* Others are easier: run through the frag list and
10082 check all windows */
10083 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10084 wnd_sum +=
10085 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10086
10087 if (unlikely(wnd_sum < lso_mss)) {
10088 to_copy = 1;
10089 break;
10090 }
10091 wnd_sum -=
10092 skb_shinfo(skb)->frags[wnd_idx].size;
10093 }
10094
10095 } else {
10096 /* in non-LSO too fragmented packet should always
10097 be linearized */
10098 to_copy = 1;
10099 }
10100 }
10101
10102exit_lbl:
10103 if (unlikely(to_copy))
10104 DP(NETIF_MSG_TX_QUEUED,
10105 "Linearization IS REQUIRED for %s packet. "
10106 "num_frags %d hlen %d first_bd_sz %d\n",
10107 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10108 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10109
10110 return to_copy;
10111}
632da4d6 10112#endif
755735eb
EG
10113
10114/* called with netif_tx_lock
a2fbb9ea 10115 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
755735eb 10116 * netif_wake_queue()
a2fbb9ea
ET
10117 */
10118static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10119{
10120 struct bnx2x *bp = netdev_priv(dev);
10121 struct bnx2x_fastpath *fp;
555f6c78 10122 struct netdev_queue *txq;
a2fbb9ea
ET
10123 struct sw_tx_bd *tx_buf;
10124 struct eth_tx_bd *tx_bd;
10125 struct eth_tx_parse_bd *pbd = NULL;
10126 u16 pkt_prod, bd_prod;
755735eb 10127 int nbd, fp_index;
a2fbb9ea 10128 dma_addr_t mapping;
755735eb
EG
10129 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10130 int vlan_off = (bp->e1hov ? 4 : 0);
10131 int i;
10132 u8 hlen = 0;
a2fbb9ea
ET
10133
10134#ifdef BNX2X_STOP_ON_ERROR
10135 if (unlikely(bp->panic))
10136 return NETDEV_TX_BUSY;
10137#endif
10138
555f6c78
EG
10139 fp_index = skb_get_queue_mapping(skb);
10140 txq = netdev_get_tx_queue(dev, fp_index);
10141
a2fbb9ea 10142 fp = &bp->fp[fp_index];
755735eb 10143
231fd58a 10144 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
de832a55 10145 fp->eth_q_stats.driver_xoff++,
555f6c78 10146 netif_tx_stop_queue(txq);
a2fbb9ea
ET
10147 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10148 return NETDEV_TX_BUSY;
10149 }
10150
755735eb
EG
10151 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10152 " gso type %x xmit_type %x\n",
10153 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10154 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10155
632da4d6 10156#if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
33471629 10157 /* First, check if we need to linearize the skb
755735eb
EG
10158 (due to FW restrictions) */
10159 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10160 /* Statistics of linearization */
10161 bp->lin_cnt++;
10162 if (skb_linearize(skb) != 0) {
10163 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10164 "silently dropping this SKB\n");
10165 dev_kfree_skb_any(skb);
da5a662a 10166 return NETDEV_TX_OK;
755735eb
EG
10167 }
10168 }
632da4d6 10169#endif
755735eb 10170
a2fbb9ea 10171 /*
755735eb 10172 Please read carefully. First we use one BD which we mark as start,
a2fbb9ea 10173 then for TSO or xsum we have a parsing info BD,
755735eb 10174 and only then we have the rest of the TSO BDs.
a2fbb9ea
ET
10175 (don't forget to mark the last one as last,
10176 and to unmap only AFTER you write to the BD ...)
755735eb 10177 And above all, all pdb sizes are in words - NOT DWORDS!
a2fbb9ea
ET
10178 */
10179
10180 pkt_prod = fp->tx_pkt_prod++;
755735eb 10181 bd_prod = TX_BD(fp->tx_bd_prod);
a2fbb9ea 10182
755735eb 10183 /* get a tx_buf and first BD */
a2fbb9ea
ET
10184 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10185 tx_bd = &fp->tx_desc_ring[bd_prod];
10186
10187 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10188 tx_bd->general_data = (UNICAST_ADDRESS <<
10189 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
3196a88a
EG
10190 /* header nbd */
10191 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
a2fbb9ea 10192
755735eb
EG
10193 /* remember the first BD of the packet */
10194 tx_buf->first_bd = fp->tx_bd_prod;
10195 tx_buf->skb = skb;
a2fbb9ea
ET
10196
10197 DP(NETIF_MSG_TX_QUEUED,
10198 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10199 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10200
0c6671b0
EG
10201#ifdef BCM_VLAN
10202 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10203 (bp->flags & HW_VLAN_TX_FLAG)) {
755735eb
EG
10204 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10205 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10206 vlan_off += 4;
10207 } else
0c6671b0 10208#endif
755735eb 10209 tx_bd->vlan = cpu_to_le16(pkt_prod);
a2fbb9ea 10210
755735eb 10211 if (xmit_type) {
755735eb 10212 /* turn on parsing and get a BD */
a2fbb9ea
ET
10213 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10214 pbd = (void *)&fp->tx_desc_ring[bd_prod];
755735eb
EG
10215
10216 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10217 }
10218
10219 if (xmit_type & XMIT_CSUM) {
10220 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
a2fbb9ea
ET
10221
10222 /* for now NS flag is not used in Linux */
755735eb 10223 pbd->global_data = (hlen |
96fc1784 10224 ((skb->protocol == ntohs(ETH_P_8021Q)) <<
a2fbb9ea 10225 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
a2fbb9ea 10226
755735eb
EG
10227 pbd->ip_hlen = (skb_transport_header(skb) -
10228 skb_network_header(skb)) / 2;
10229
10230 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
a2fbb9ea 10231
755735eb
EG
10232 pbd->total_hlen = cpu_to_le16(hlen);
10233 hlen = hlen*2 - vlan_off;
a2fbb9ea 10234
755735eb
EG
10235 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10236
10237 if (xmit_type & XMIT_CSUM_V4)
a2fbb9ea 10238 tx_bd->bd_flags.as_bitfield |=
755735eb
EG
10239 ETH_TX_BD_FLAGS_IP_CSUM;
10240 else
10241 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10242
10243 if (xmit_type & XMIT_CSUM_TCP) {
10244 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10245
10246 } else {
10247 s8 fix = SKB_CS_OFF(skb); /* signed! */
10248
a2fbb9ea 10249 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
755735eb 10250 pbd->cs_offset = fix / 2;
a2fbb9ea 10251
755735eb
EG
10252 DP(NETIF_MSG_TX_QUEUED,
10253 "hlen %d offset %d fix %d csum before fix %x\n",
10254 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10255 SKB_CS(skb));
10256
10257 /* HW bug: fixup the CSUM */
10258 pbd->tcp_pseudo_csum =
10259 bnx2x_csum_fix(skb_transport_header(skb),
10260 SKB_CS(skb), fix);
10261
10262 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10263 pbd->tcp_pseudo_csum);
10264 }
a2fbb9ea
ET
10265 }
10266
10267 mapping = pci_map_single(bp->pdev, skb->data,
755735eb 10268 skb_headlen(skb), PCI_DMA_TODEVICE);
a2fbb9ea
ET
10269
10270 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10271 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
6378c025 10272 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
a2fbb9ea
ET
10273 tx_bd->nbd = cpu_to_le16(nbd);
10274 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10275
10276 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
755735eb
EG
10277 " nbytes %d flags %x vlan %x\n",
10278 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10279 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10280 le16_to_cpu(tx_bd->vlan));
a2fbb9ea 10281
755735eb 10282 if (xmit_type & XMIT_GSO) {
a2fbb9ea
ET
10283
10284 DP(NETIF_MSG_TX_QUEUED,
10285 "TSO packet len %d hlen %d total len %d tso size %d\n",
10286 skb->len, hlen, skb_headlen(skb),
10287 skb_shinfo(skb)->gso_size);
10288
10289 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10290
755735eb
EG
10291 if (unlikely(skb_headlen(skb) > hlen))
10292 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10293 bd_prod, ++nbd);
a2fbb9ea
ET
10294
10295 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10296 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
755735eb
EG
10297 pbd->tcp_flags = pbd_tcp_flags(skb);
10298
10299 if (xmit_type & XMIT_GSO_V4) {
10300 pbd->ip_id = swab16(ip_hdr(skb)->id);
10301 pbd->tcp_pseudo_csum =
a2fbb9ea
ET
10302 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10303 ip_hdr(skb)->daddr,
10304 0, IPPROTO_TCP, 0));
755735eb
EG
10305
10306 } else
10307 pbd->tcp_pseudo_csum =
10308 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10309 &ipv6_hdr(skb)->daddr,
10310 0, IPPROTO_TCP, 0));
10311
a2fbb9ea
ET
10312 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10313 }
10314
755735eb
EG
10315 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10316 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
a2fbb9ea 10317
755735eb
EG
10318 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10319 tx_bd = &fp->tx_desc_ring[bd_prod];
a2fbb9ea 10320
755735eb
EG
10321 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10322 frag->size, PCI_DMA_TODEVICE);
a2fbb9ea 10323
755735eb
EG
10324 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10325 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10326 tx_bd->nbytes = cpu_to_le16(frag->size);
10327 tx_bd->vlan = cpu_to_le16(pkt_prod);
10328 tx_bd->bd_flags.as_bitfield = 0;
a2fbb9ea 10329
755735eb
EG
10330 DP(NETIF_MSG_TX_QUEUED,
10331 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
10332 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10333 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
a2fbb9ea
ET
10334 }
10335
755735eb 10336 /* now at last mark the BD as the last BD */
a2fbb9ea
ET
10337 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10338
10339 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
10340 tx_bd, tx_bd->bd_flags.as_bitfield);
10341
a2fbb9ea
ET
10342 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10343
755735eb 10344 /* now send a tx doorbell, counting the next BD
a2fbb9ea
ET
10345 * if the packet contains or ends with it
10346 */
10347 if (TX_BD_POFF(bd_prod) < nbd)
10348 nbd++;
10349
10350 if (pbd)
10351 DP(NETIF_MSG_TX_QUEUED,
10352 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10353 " tcp_flags %x xsum %x seq %u hlen %u\n",
10354 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10355 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
755735eb 10356 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
a2fbb9ea 10357
755735eb 10358 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
a2fbb9ea 10359
58f4c4cf
EG
10360 /*
10361 * Make sure that the BD data is updated before updating the producer
10362 * since FW might read the BD right after the producer is updated.
10363 * This is only applicable for weak-ordered memory model archs such
10364 * as IA-64. The following barrier is also mandatory since FW will
10365 * assumes packets must have BDs.
10366 */
10367 wmb();
10368
96fc1784
ET
10369 fp->hw_tx_prods->bds_prod =
10370 cpu_to_le16(le16_to_cpu(fp->hw_tx_prods->bds_prod) + nbd);
a2fbb9ea 10371 mb(); /* FW restriction: must not reorder writing nbd and packets */
96fc1784
ET
10372 fp->hw_tx_prods->packets_prod =
10373 cpu_to_le32(le32_to_cpu(fp->hw_tx_prods->packets_prod) + 1);
755735eb 10374 DOORBELL(bp, FP_IDX(fp), 0);
a2fbb9ea
ET
10375
10376 mmiowb();
10377
755735eb 10378 fp->tx_bd_prod += nbd;
a2fbb9ea
ET
10379 dev->trans_start = jiffies;
10380
10381 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
58f4c4cf
EG
10382 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10383 if we put Tx into XOFF state. */
10384 smp_mb();
555f6c78 10385 netif_tx_stop_queue(txq);
de832a55 10386 fp->eth_q_stats.driver_xoff++;
a2fbb9ea 10387 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
555f6c78 10388 netif_tx_wake_queue(txq);
a2fbb9ea
ET
10389 }
10390 fp->tx_pkt++;
10391
10392 return NETDEV_TX_OK;
10393}
10394
bb2a0f7a 10395/* called with rtnl_lock */
a2fbb9ea
ET
10396static int bnx2x_open(struct net_device *dev)
10397{
10398 struct bnx2x *bp = netdev_priv(dev);
10399
6eccabb3
EG
10400 netif_carrier_off(dev);
10401
a2fbb9ea
ET
10402 bnx2x_set_power_state(bp, PCI_D0);
10403
bb2a0f7a 10404 return bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea
ET
10405}
10406
bb2a0f7a 10407/* called with rtnl_lock */
a2fbb9ea
ET
10408static int bnx2x_close(struct net_device *dev)
10409{
a2fbb9ea
ET
10410 struct bnx2x *bp = netdev_priv(dev);
10411
10412 /* Unload the driver, release IRQs */
bb2a0f7a
YG
10413 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10414 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10415 if (!CHIP_REV_IS_SLOW(bp))
10416 bnx2x_set_power_state(bp, PCI_D3hot);
a2fbb9ea
ET
10417
10418 return 0;
10419}
10420
34f80b04
EG
10421/* called with netif_tx_lock from set_multicast */
10422static void bnx2x_set_rx_mode(struct net_device *dev)
10423{
10424 struct bnx2x *bp = netdev_priv(dev);
10425 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10426 int port = BP_PORT(bp);
10427
10428 if (bp->state != BNX2X_STATE_OPEN) {
10429 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10430 return;
10431 }
10432
10433 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10434
10435 if (dev->flags & IFF_PROMISC)
10436 rx_mode = BNX2X_RX_MODE_PROMISC;
10437
10438 else if ((dev->flags & IFF_ALLMULTI) ||
10439 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10440 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10441
10442 else { /* some multicasts */
10443 if (CHIP_IS_E1(bp)) {
10444 int i, old, offset;
10445 struct dev_mc_list *mclist;
10446 struct mac_configuration_cmd *config =
10447 bnx2x_sp(bp, mcast_config);
10448
10449 for (i = 0, mclist = dev->mc_list;
10450 mclist && (i < dev->mc_count);
10451 i++, mclist = mclist->next) {
10452
10453 config->config_table[i].
10454 cam_entry.msb_mac_addr =
10455 swab16(*(u16 *)&mclist->dmi_addr[0]);
10456 config->config_table[i].
10457 cam_entry.middle_mac_addr =
10458 swab16(*(u16 *)&mclist->dmi_addr[2]);
10459 config->config_table[i].
10460 cam_entry.lsb_mac_addr =
10461 swab16(*(u16 *)&mclist->dmi_addr[4]);
10462 config->config_table[i].cam_entry.flags =
10463 cpu_to_le16(port);
10464 config->config_table[i].
10465 target_table_entry.flags = 0;
10466 config->config_table[i].
10467 target_table_entry.client_id = 0;
10468 config->config_table[i].
10469 target_table_entry.vlan_id = 0;
10470
10471 DP(NETIF_MSG_IFUP,
10472 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10473 config->config_table[i].
10474 cam_entry.msb_mac_addr,
10475 config->config_table[i].
10476 cam_entry.middle_mac_addr,
10477 config->config_table[i].
10478 cam_entry.lsb_mac_addr);
10479 }
8d9c5f34 10480 old = config->hdr.length;
34f80b04
EG
10481 if (old > i) {
10482 for (; i < old; i++) {
10483 if (CAM_IS_INVALID(config->
10484 config_table[i])) {
af246401 10485 /* already invalidated */
34f80b04
EG
10486 break;
10487 }
10488 /* invalidate */
10489 CAM_INVALIDATE(config->
10490 config_table[i]);
10491 }
10492 }
10493
10494 if (CHIP_REV_IS_SLOW(bp))
10495 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10496 else
10497 offset = BNX2X_MAX_MULTICAST*(1 + port);
10498
8d9c5f34 10499 config->hdr.length = i;
34f80b04 10500 config->hdr.offset = offset;
8d9c5f34 10501 config->hdr.client_id = bp->fp->cl_id;
34f80b04
EG
10502 config->hdr.reserved1 = 0;
10503
10504 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10505 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10506 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10507 0);
10508 } else { /* E1H */
10509 /* Accept one or more multicasts */
10510 struct dev_mc_list *mclist;
10511 u32 mc_filter[MC_HASH_SIZE];
10512 u32 crc, bit, regidx;
10513 int i;
10514
10515 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10516
10517 for (i = 0, mclist = dev->mc_list;
10518 mclist && (i < dev->mc_count);
10519 i++, mclist = mclist->next) {
10520
7c510e4b
JB
10521 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10522 mclist->dmi_addr);
34f80b04
EG
10523
10524 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10525 bit = (crc >> 24) & 0xff;
10526 regidx = bit >> 5;
10527 bit &= 0x1f;
10528 mc_filter[regidx] |= (1 << bit);
10529 }
10530
10531 for (i = 0; i < MC_HASH_SIZE; i++)
10532 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10533 mc_filter[i]);
10534 }
10535 }
10536
10537 bp->rx_mode = rx_mode;
10538 bnx2x_set_storm_rx_mode(bp);
10539}
10540
10541/* called with rtnl_lock */
a2fbb9ea
ET
10542static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10543{
10544 struct sockaddr *addr = p;
10545 struct bnx2x *bp = netdev_priv(dev);
10546
34f80b04 10547 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
a2fbb9ea
ET
10548 return -EINVAL;
10549
10550 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
34f80b04
EG
10551 if (netif_running(dev)) {
10552 if (CHIP_IS_E1(bp))
3101c2bc 10553 bnx2x_set_mac_addr_e1(bp, 1);
34f80b04 10554 else
3101c2bc 10555 bnx2x_set_mac_addr_e1h(bp, 1);
34f80b04 10556 }
a2fbb9ea
ET
10557
10558 return 0;
10559}
10560
c18487ee 10561/* called with rtnl_lock */
a2fbb9ea
ET
10562static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10563{
10564 struct mii_ioctl_data *data = if_mii(ifr);
10565 struct bnx2x *bp = netdev_priv(dev);
3196a88a 10566 int port = BP_PORT(bp);
a2fbb9ea
ET
10567 int err;
10568
10569 switch (cmd) {
10570 case SIOCGMIIPHY:
34f80b04 10571 data->phy_id = bp->port.phy_addr;
a2fbb9ea 10572
c14423fe 10573 /* fallthrough */
c18487ee 10574
a2fbb9ea 10575 case SIOCGMIIREG: {
c18487ee 10576 u16 mii_regval;
a2fbb9ea 10577
c18487ee
YR
10578 if (!netif_running(dev))
10579 return -EAGAIN;
a2fbb9ea 10580
34f80b04 10581 mutex_lock(&bp->port.phy_mutex);
3196a88a 10582 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10583 DEFAULT_PHY_DEV_ADDR,
10584 (data->reg_num & 0x1f), &mii_regval);
10585 data->val_out = mii_regval;
34f80b04 10586 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10587 return err;
10588 }
10589
10590 case SIOCSMIIREG:
10591 if (!capable(CAP_NET_ADMIN))
10592 return -EPERM;
10593
c18487ee
YR
10594 if (!netif_running(dev))
10595 return -EAGAIN;
10596
34f80b04 10597 mutex_lock(&bp->port.phy_mutex);
3196a88a 10598 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
c18487ee
YR
10599 DEFAULT_PHY_DEV_ADDR,
10600 (data->reg_num & 0x1f), data->val_in);
34f80b04 10601 mutex_unlock(&bp->port.phy_mutex);
a2fbb9ea
ET
10602 return err;
10603
10604 default:
10605 /* do nothing */
10606 break;
10607 }
10608
10609 return -EOPNOTSUPP;
10610}
10611
34f80b04 10612/* called with rtnl_lock */
a2fbb9ea
ET
10613static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10614{
10615 struct bnx2x *bp = netdev_priv(dev);
34f80b04 10616 int rc = 0;
a2fbb9ea
ET
10617
10618 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10619 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
10620 return -EINVAL;
10621
10622 /* This does not race with packet allocation
c14423fe 10623 * because the actual alloc size is
a2fbb9ea
ET
10624 * only updated as part of load
10625 */
10626 dev->mtu = new_mtu;
10627
10628 if (netif_running(dev)) {
34f80b04
EG
10629 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10630 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
a2fbb9ea 10631 }
34f80b04
EG
10632
10633 return rc;
a2fbb9ea
ET
10634}
10635
10636static void bnx2x_tx_timeout(struct net_device *dev)
10637{
10638 struct bnx2x *bp = netdev_priv(dev);
10639
10640#ifdef BNX2X_STOP_ON_ERROR
10641 if (!bp->panic)
10642 bnx2x_panic();
10643#endif
10644 /* This allows the netif to be shutdown gracefully before resetting */
10645 schedule_work(&bp->reset_task);
10646}
10647
10648#ifdef BCM_VLAN
34f80b04 10649/* called with rtnl_lock */
a2fbb9ea
ET
10650static void bnx2x_vlan_rx_register(struct net_device *dev,
10651 struct vlan_group *vlgrp)
10652{
10653 struct bnx2x *bp = netdev_priv(dev);
10654
10655 bp->vlgrp = vlgrp;
0c6671b0
EG
10656
10657 /* Set flags according to the required capabilities */
10658 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
10659
10660 if (dev->features & NETIF_F_HW_VLAN_TX)
10661 bp->flags |= HW_VLAN_TX_FLAG;
10662
10663 if (dev->features & NETIF_F_HW_VLAN_RX)
10664 bp->flags |= HW_VLAN_RX_FLAG;
10665
a2fbb9ea 10666 if (netif_running(dev))
49d66772 10667 bnx2x_set_client_config(bp);
a2fbb9ea 10668}
34f80b04 10669
a2fbb9ea
ET
10670#endif
10671
10672#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10673static void poll_bnx2x(struct net_device *dev)
10674{
10675 struct bnx2x *bp = netdev_priv(dev);
10676
10677 disable_irq(bp->pdev->irq);
10678 bnx2x_interrupt(bp->pdev->irq, dev);
10679 enable_irq(bp->pdev->irq);
10680}
10681#endif
10682
c64213cd
SH
10683static const struct net_device_ops bnx2x_netdev_ops = {
10684 .ndo_open = bnx2x_open,
10685 .ndo_stop = bnx2x_close,
10686 .ndo_start_xmit = bnx2x_start_xmit,
10687 .ndo_set_multicast_list = bnx2x_set_rx_mode,
10688 .ndo_set_mac_address = bnx2x_change_mac_addr,
10689 .ndo_validate_addr = eth_validate_addr,
10690 .ndo_do_ioctl = bnx2x_ioctl,
10691 .ndo_change_mtu = bnx2x_change_mtu,
10692 .ndo_tx_timeout = bnx2x_tx_timeout,
10693#ifdef BCM_VLAN
10694 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
10695#endif
10696#if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
10697 .ndo_poll_controller = poll_bnx2x,
10698#endif
10699};
10700
10701
34f80b04
EG
10702static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
10703 struct net_device *dev)
a2fbb9ea
ET
10704{
10705 struct bnx2x *bp;
10706 int rc;
10707
10708 SET_NETDEV_DEV(dev, &pdev->dev);
10709 bp = netdev_priv(dev);
10710
34f80b04
EG
10711 bp->dev = dev;
10712 bp->pdev = pdev;
a2fbb9ea 10713 bp->flags = 0;
34f80b04 10714 bp->func = PCI_FUNC(pdev->devfn);
a2fbb9ea
ET
10715
10716 rc = pci_enable_device(pdev);
10717 if (rc) {
10718 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
10719 goto err_out;
10720 }
10721
10722 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
10723 printk(KERN_ERR PFX "Cannot find PCI device base address,"
10724 " aborting\n");
10725 rc = -ENODEV;
10726 goto err_out_disable;
10727 }
10728
10729 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
10730 printk(KERN_ERR PFX "Cannot find second PCI device"
10731 " base address, aborting\n");
10732 rc = -ENODEV;
10733 goto err_out_disable;
10734 }
10735
34f80b04
EG
10736 if (atomic_read(&pdev->enable_cnt) == 1) {
10737 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
10738 if (rc) {
10739 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
10740 " aborting\n");
10741 goto err_out_disable;
10742 }
a2fbb9ea 10743
34f80b04
EG
10744 pci_set_master(pdev);
10745 pci_save_state(pdev);
10746 }
a2fbb9ea
ET
10747
10748 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
10749 if (bp->pm_cap == 0) {
10750 printk(KERN_ERR PFX "Cannot find power management"
10751 " capability, aborting\n");
10752 rc = -EIO;
10753 goto err_out_release;
10754 }
10755
10756 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
10757 if (bp->pcie_cap == 0) {
10758 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
10759 " aborting\n");
10760 rc = -EIO;
10761 goto err_out_release;
10762 }
10763
10764 if (pci_set_dma_mask(pdev, DMA_64BIT_MASK) == 0) {
10765 bp->flags |= USING_DAC_FLAG;
10766 if (pci_set_consistent_dma_mask(pdev, DMA_64BIT_MASK) != 0) {
10767 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
10768 " failed, aborting\n");
10769 rc = -EIO;
10770 goto err_out_release;
10771 }
10772
10773 } else if (pci_set_dma_mask(pdev, DMA_32BIT_MASK) != 0) {
10774 printk(KERN_ERR PFX "System does not support DMA,"
10775 " aborting\n");
10776 rc = -EIO;
10777 goto err_out_release;
10778 }
10779
34f80b04
EG
10780 dev->mem_start = pci_resource_start(pdev, 0);
10781 dev->base_addr = dev->mem_start;
10782 dev->mem_end = pci_resource_end(pdev, 0);
a2fbb9ea
ET
10783
10784 dev->irq = pdev->irq;
10785
275f165f 10786 bp->regview = pci_ioremap_bar(pdev, 0);
a2fbb9ea
ET
10787 if (!bp->regview) {
10788 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
10789 rc = -ENOMEM;
10790 goto err_out_release;
10791 }
10792
34f80b04
EG
10793 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
10794 min_t(u64, BNX2X_DB_SIZE,
10795 pci_resource_len(pdev, 2)));
a2fbb9ea
ET
10796 if (!bp->doorbells) {
10797 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
10798 rc = -ENOMEM;
10799 goto err_out_unmap;
10800 }
10801
10802 bnx2x_set_power_state(bp, PCI_D0);
10803
34f80b04
EG
10804 /* clean indirect addresses */
10805 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
10806 PCICFG_VENDOR_ID_OFFSET);
10807 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
10808 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
10809 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
10810 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
a2fbb9ea 10811
34f80b04 10812 dev->watchdog_timeo = TX_TIMEOUT;
a2fbb9ea 10813
c64213cd 10814 dev->netdev_ops = &bnx2x_netdev_ops;
34f80b04 10815 dev->ethtool_ops = &bnx2x_ethtool_ops;
34f80b04
EG
10816 dev->features |= NETIF_F_SG;
10817 dev->features |= NETIF_F_HW_CSUM;
10818 if (bp->flags & USING_DAC_FLAG)
10819 dev->features |= NETIF_F_HIGHDMA;
10820#ifdef BCM_VLAN
10821 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
0c6671b0 10822 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
34f80b04
EG
10823#endif
10824 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
755735eb 10825 dev->features |= NETIF_F_TSO6;
a2fbb9ea
ET
10826
10827 return 0;
10828
10829err_out_unmap:
10830 if (bp->regview) {
10831 iounmap(bp->regview);
10832 bp->regview = NULL;
10833 }
a2fbb9ea
ET
10834 if (bp->doorbells) {
10835 iounmap(bp->doorbells);
10836 bp->doorbells = NULL;
10837 }
10838
10839err_out_release:
34f80b04
EG
10840 if (atomic_read(&pdev->enable_cnt) == 1)
10841 pci_release_regions(pdev);
a2fbb9ea
ET
10842
10843err_out_disable:
10844 pci_disable_device(pdev);
10845 pci_set_drvdata(pdev, NULL);
10846
10847err_out:
10848 return rc;
10849}
10850
25047950
ET
10851static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
10852{
10853 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10854
10855 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
10856 return val;
10857}
10858
10859/* return value of 1=2.5GHz 2=5GHz */
10860static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
10861{
10862 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
10863
10864 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
10865 return val;
10866}
10867
a2fbb9ea
ET
10868static int __devinit bnx2x_init_one(struct pci_dev *pdev,
10869 const struct pci_device_id *ent)
10870{
10871 static int version_printed;
10872 struct net_device *dev = NULL;
10873 struct bnx2x *bp;
25047950 10874 int rc;
a2fbb9ea
ET
10875
10876 if (version_printed++ == 0)
10877 printk(KERN_INFO "%s", version);
10878
10879 /* dev zeroed in init_etherdev */
555f6c78 10880 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
34f80b04
EG
10881 if (!dev) {
10882 printk(KERN_ERR PFX "Cannot allocate net device\n");
a2fbb9ea 10883 return -ENOMEM;
34f80b04 10884 }
a2fbb9ea 10885
a2fbb9ea
ET
10886 bp = netdev_priv(dev);
10887 bp->msglevel = debug;
10888
34f80b04 10889 rc = bnx2x_init_dev(pdev, dev);
a2fbb9ea
ET
10890 if (rc < 0) {
10891 free_netdev(dev);
10892 return rc;
10893 }
10894
a2fbb9ea
ET
10895 pci_set_drvdata(pdev, dev);
10896
34f80b04 10897 rc = bnx2x_init_bp(bp);
693fc0d1
EG
10898 if (rc)
10899 goto init_one_exit;
10900
10901 rc = register_netdev(dev);
34f80b04 10902 if (rc) {
693fc0d1 10903 dev_err(&pdev->dev, "Cannot register net device\n");
34f80b04
EG
10904 goto init_one_exit;
10905 }
10906
25047950 10907 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
87942b46 10908 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
34f80b04 10909 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
25047950
ET
10910 bnx2x_get_pcie_width(bp),
10911 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
10912 dev->base_addr, bp->pdev->irq);
e174961c 10913 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
a2fbb9ea 10914 return 0;
34f80b04
EG
10915
10916init_one_exit:
10917 if (bp->regview)
10918 iounmap(bp->regview);
10919
10920 if (bp->doorbells)
10921 iounmap(bp->doorbells);
10922
10923 free_netdev(dev);
10924
10925 if (atomic_read(&pdev->enable_cnt) == 1)
10926 pci_release_regions(pdev);
10927
10928 pci_disable_device(pdev);
10929 pci_set_drvdata(pdev, NULL);
10930
10931 return rc;
a2fbb9ea
ET
10932}
10933
10934static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
10935{
10936 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10937 struct bnx2x *bp;
10938
10939 if (!dev) {
228241eb
ET
10940 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10941 return;
10942 }
228241eb 10943 bp = netdev_priv(dev);
a2fbb9ea 10944
a2fbb9ea
ET
10945 unregister_netdev(dev);
10946
10947 if (bp->regview)
10948 iounmap(bp->regview);
10949
10950 if (bp->doorbells)
10951 iounmap(bp->doorbells);
10952
10953 free_netdev(dev);
34f80b04
EG
10954
10955 if (atomic_read(&pdev->enable_cnt) == 1)
10956 pci_release_regions(pdev);
10957
a2fbb9ea
ET
10958 pci_disable_device(pdev);
10959 pci_set_drvdata(pdev, NULL);
10960}
10961
10962static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
10963{
10964 struct net_device *dev = pci_get_drvdata(pdev);
228241eb
ET
10965 struct bnx2x *bp;
10966
34f80b04
EG
10967 if (!dev) {
10968 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
10969 return -ENODEV;
10970 }
10971 bp = netdev_priv(dev);
a2fbb9ea 10972
34f80b04 10973 rtnl_lock();
a2fbb9ea 10974
34f80b04 10975 pci_save_state(pdev);
228241eb 10976
34f80b04
EG
10977 if (!netif_running(dev)) {
10978 rtnl_unlock();
10979 return 0;
10980 }
a2fbb9ea
ET
10981
10982 netif_device_detach(dev);
a2fbb9ea 10983
da5a662a 10984 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
34f80b04 10985
a2fbb9ea 10986 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
228241eb 10987
34f80b04
EG
10988 rtnl_unlock();
10989
a2fbb9ea
ET
10990 return 0;
10991}
10992
10993static int bnx2x_resume(struct pci_dev *pdev)
10994{
10995 struct net_device *dev = pci_get_drvdata(pdev);
228241eb 10996 struct bnx2x *bp;
a2fbb9ea
ET
10997 int rc;
10998
228241eb
ET
10999 if (!dev) {
11000 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11001 return -ENODEV;
11002 }
228241eb 11003 bp = netdev_priv(dev);
a2fbb9ea 11004
34f80b04
EG
11005 rtnl_lock();
11006
228241eb 11007 pci_restore_state(pdev);
34f80b04
EG
11008
11009 if (!netif_running(dev)) {
11010 rtnl_unlock();
11011 return 0;
11012 }
11013
a2fbb9ea
ET
11014 bnx2x_set_power_state(bp, PCI_D0);
11015 netif_device_attach(dev);
11016
da5a662a 11017 rc = bnx2x_nic_load(bp, LOAD_OPEN);
a2fbb9ea 11018
34f80b04
EG
11019 rtnl_unlock();
11020
11021 return rc;
a2fbb9ea
ET
11022}
11023
f8ef6e44
YG
11024static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11025{
11026 int i;
11027
11028 bp->state = BNX2X_STATE_ERROR;
11029
11030 bp->rx_mode = BNX2X_RX_MODE_NONE;
11031
11032 bnx2x_netif_stop(bp, 0);
11033
11034 del_timer_sync(&bp->timer);
11035 bp->stats_state = STATS_STATE_DISABLED;
11036 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11037
11038 /* Release IRQs */
11039 bnx2x_free_irq(bp);
11040
11041 if (CHIP_IS_E1(bp)) {
11042 struct mac_configuration_cmd *config =
11043 bnx2x_sp(bp, mcast_config);
11044
8d9c5f34 11045 for (i = 0; i < config->hdr.length; i++)
f8ef6e44
YG
11046 CAM_INVALIDATE(config->config_table[i]);
11047 }
11048
11049 /* Free SKBs, SGEs, TPA pool and driver internals */
11050 bnx2x_free_skbs(bp);
555f6c78 11051 for_each_rx_queue(bp, i)
f8ef6e44 11052 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
555f6c78 11053 for_each_rx_queue(bp, i)
7cde1c8b 11054 netif_napi_del(&bnx2x_fp(bp, i, napi));
f8ef6e44
YG
11055 bnx2x_free_mem(bp);
11056
11057 bp->state = BNX2X_STATE_CLOSED;
11058
11059 netif_carrier_off(bp->dev);
11060
11061 return 0;
11062}
11063
11064static void bnx2x_eeh_recover(struct bnx2x *bp)
11065{
11066 u32 val;
11067
11068 mutex_init(&bp->port.phy_mutex);
11069
11070 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11071 bp->link_params.shmem_base = bp->common.shmem_base;
11072 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11073
11074 if (!bp->common.shmem_base ||
11075 (bp->common.shmem_base < 0xA0000) ||
11076 (bp->common.shmem_base >= 0xC0000)) {
11077 BNX2X_DEV_INFO("MCP not active\n");
11078 bp->flags |= NO_MCP_FLAG;
11079 return;
11080 }
11081
11082 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11083 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11084 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11085 BNX2X_ERR("BAD MCP validity signature\n");
11086
11087 if (!BP_NOMCP(bp)) {
11088 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11089 & DRV_MSG_SEQ_NUMBER_MASK);
11090 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11091 }
11092}
11093
493adb1f
WX
11094/**
11095 * bnx2x_io_error_detected - called when PCI error is detected
11096 * @pdev: Pointer to PCI device
11097 * @state: The current pci connection state
11098 *
11099 * This function is called after a PCI bus error affecting
11100 * this device has been detected.
11101 */
11102static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11103 pci_channel_state_t state)
11104{
11105 struct net_device *dev = pci_get_drvdata(pdev);
11106 struct bnx2x *bp = netdev_priv(dev);
11107
11108 rtnl_lock();
11109
11110 netif_device_detach(dev);
11111
11112 if (netif_running(dev))
f8ef6e44 11113 bnx2x_eeh_nic_unload(bp);
493adb1f
WX
11114
11115 pci_disable_device(pdev);
11116
11117 rtnl_unlock();
11118
11119 /* Request a slot reset */
11120 return PCI_ERS_RESULT_NEED_RESET;
11121}
11122
11123/**
11124 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11125 * @pdev: Pointer to PCI device
11126 *
11127 * Restart the card from scratch, as if from a cold-boot.
11128 */
11129static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11130{
11131 struct net_device *dev = pci_get_drvdata(pdev);
11132 struct bnx2x *bp = netdev_priv(dev);
11133
11134 rtnl_lock();
11135
11136 if (pci_enable_device(pdev)) {
11137 dev_err(&pdev->dev,
11138 "Cannot re-enable PCI device after reset\n");
11139 rtnl_unlock();
11140 return PCI_ERS_RESULT_DISCONNECT;
11141 }
11142
11143 pci_set_master(pdev);
11144 pci_restore_state(pdev);
11145
11146 if (netif_running(dev))
11147 bnx2x_set_power_state(bp, PCI_D0);
11148
11149 rtnl_unlock();
11150
11151 return PCI_ERS_RESULT_RECOVERED;
11152}
11153
11154/**
11155 * bnx2x_io_resume - called when traffic can start flowing again
11156 * @pdev: Pointer to PCI device
11157 *
11158 * This callback is called when the error recovery driver tells us that
11159 * its OK to resume normal operation.
11160 */
11161static void bnx2x_io_resume(struct pci_dev *pdev)
11162{
11163 struct net_device *dev = pci_get_drvdata(pdev);
11164 struct bnx2x *bp = netdev_priv(dev);
11165
11166 rtnl_lock();
11167
f8ef6e44
YG
11168 bnx2x_eeh_recover(bp);
11169
493adb1f 11170 if (netif_running(dev))
f8ef6e44 11171 bnx2x_nic_load(bp, LOAD_NORMAL);
493adb1f
WX
11172
11173 netif_device_attach(dev);
11174
11175 rtnl_unlock();
11176}
11177
11178static struct pci_error_handlers bnx2x_err_handler = {
11179 .error_detected = bnx2x_io_error_detected,
11180 .slot_reset = bnx2x_io_slot_reset,
11181 .resume = bnx2x_io_resume,
11182};
11183
a2fbb9ea 11184static struct pci_driver bnx2x_pci_driver = {
493adb1f
WX
11185 .name = DRV_MODULE_NAME,
11186 .id_table = bnx2x_pci_tbl,
11187 .probe = bnx2x_init_one,
11188 .remove = __devexit_p(bnx2x_remove_one),
11189 .suspend = bnx2x_suspend,
11190 .resume = bnx2x_resume,
11191 .err_handler = &bnx2x_err_handler,
a2fbb9ea
ET
11192};
11193
11194static int __init bnx2x_init(void)
11195{
1cf167f2
EG
11196 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11197 if (bnx2x_wq == NULL) {
11198 printk(KERN_ERR PFX "Cannot create workqueue\n");
11199 return -ENOMEM;
11200 }
11201
a2fbb9ea
ET
11202 return pci_register_driver(&bnx2x_pci_driver);
11203}
11204
11205static void __exit bnx2x_cleanup(void)
11206{
11207 pci_unregister_driver(&bnx2x_pci_driver);
1cf167f2
EG
11208
11209 destroy_workqueue(bnx2x_wq);
a2fbb9ea
ET
11210}
11211
11212module_init(bnx2x_init);
11213module_exit(bnx2x_cleanup);
11214